[llvm] d3b0fba - Revert rG0b0a38a7a229b70d7261771ba0e702843bd34e97 : "[X86] combineX86ShufflesRecursively - don't widen shuffle subvector inputs"

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 13 05:29:11 PST 2023


Author: Simon Pilgrim
Date: 2023-02-13T13:28:48Z
New Revision: d3b0fba6084d800ff37432705573c1b0318c7f06

URL: https://github.com/llvm/llvm-project/commit/d3b0fba6084d800ff37432705573c1b0318c7f06
DIFF: https://github.com/llvm/llvm-project/commit/d3b0fba6084d800ff37432705573c1b0318c7f06.diff

LOG: Revert rG0b0a38a7a229b70d7261771ba0e702843bd34e97 : "[X86] combineX86ShufflesRecursively - don't widen shuffle subvector inputs"

Reports of miscompiles, that I'm still trying to triage - reverting for now

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
    llvm/test/CodeGen/X86/avx-insertelt.ll
    llvm/test/CodeGen/X86/avx-vperm2x128.ll
    llvm/test/CodeGen/X86/avx512-insert-extract.ll
    llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
    llvm/test/CodeGen/X86/horizontal-sum.ll
    llvm/test/CodeGen/X86/matrix-multiply.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-8.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-8.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
    llvm/test/CodeGen/X86/vector-pack-512.ll
    llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
    llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll
    llvm/test/CodeGen/X86/vector-shuffle-512-v64.ll
    llvm/test/CodeGen/X86/vector-shuffle-combining.ll
    llvm/test/CodeGen/X86/vector-shuffle-v192.ll
    llvm/test/CodeGen/X86/vselect-avx.ll
    llvm/test/CodeGen/X86/widen_fadd.ll
    llvm/test/CodeGen/X86/widen_fdiv.ll
    llvm/test/CodeGen/X86/widen_fmul.ll
    llvm/test/CodeGen/X86/widen_fsub.ll
    llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll
    llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
    llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 03d4fdfd10358..72c3bfd821c8b 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -8452,16 +8452,6 @@ static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
       Ops.push_back(Sub.getOperand(0));
       return true;
     }
-    // Handle generic INSERT_SUBVECTOR(SRC, SUB) iff its not a widening node.
-    if (!Src.isUndef() || InsertIdx != 0) {
-      Mask.assign(NumElts, SM_SentinelUndef);
-      std::iota(Mask.begin(), Mask.end(), 0);
-      for (int i = 0; i != (int)NumSubElts; ++i)
-        Mask[InsertIdx + i] = NumElts + i;
-      Ops.push_back(Src);
-      Ops.push_back(Sub);
-      return true;
-    }
     // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
     SmallVector<int, 64> SubMask;
     SmallVector<SDValue, 2> SubInputs;
@@ -40581,6 +40571,21 @@ static SDValue combineX86ShufflesRecursively(
                 Op, OpScaledDemandedElts, DAG))
       Op = NewOp;
   }
+  // FIXME: should we rerun resolveTargetShuffleInputsAndMask() now?
+
+  // Widen any subvector shuffle inputs we've collected.
+  // TODO: Remove this to avoid generating temporary nodes, we should only
+  // widen once combineX86ShuffleChain has found a match.
+  if (any_of(Ops, [RootSizeInBits](SDValue Op) {
+        return Op.getValueSizeInBits() < RootSizeInBits;
+      })) {
+    for (SDValue &Op : Ops)
+      if (Op.getValueSizeInBits() < RootSizeInBits)
+        Op = widenSubVector(Op, false, Subtarget, DAG, SDLoc(Op),
+                            RootSizeInBits);
+    // Reresolve - we might have repeated subvector sources.
+    resolveTargetShuffleInputsAndMask(Ops, Mask);
+  }
 
   // We can only combine unary and binary shuffle mask cases.
   if (Ops.size() <= 2) {
@@ -55540,16 +55545,6 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
 
     unsigned NumOps = Ops.size();
     switch (Op0.getOpcode()) {
-    case ISD::AssertSext:
-    case ISD::AssertZext: {
-      if (!IsSplat && llvm::all_of(Ops, [&Op0](SDValue Op) {
-            return Op0.getOperand(1) == Op.getOperand(1);
-          })) {
-        return DAG.getNode(Op0.getOpcode(), DL, VT,
-                           ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
-      }
-      break;
-    }
     case X86ISD::VBROADCAST: {
       if (!IsSplat && llvm::all_of(Ops, [](SDValue Op) {
             return Op.getOperand(0).getValueType().is128BitVector();

diff  --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
index 905d022696679..59170e99521e5 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
@@ -2031,9 +2031,9 @@ define void @vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4(ptr %in.
 ; AVX512BW-FAST-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4:
 ; AVX512BW-FAST:       # %bb.0:
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,9,0,11,0,13,0,15]
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,25,0,27,0,29,0,31]
 ; AVX512BW-FAST-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vpermd %zmm0, %zmm1, %zmm0
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm1, %zmm0
 ; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper
@@ -4274,15 +4274,16 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512F-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX512F-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,21,0,23]
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm2, %ymm0
-; AVX512F-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
-; AVX512F-NEXT:    vpaddb (%rdx), %ymm2, %ymm1
-; AVX512F-NEXT:    vmovdqa %ymm1, (%rcx)
-; AVX512F-NEXT:    vmovdqa %ymm0, 32(%rcx)
+; AVX512F-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,13,0,15]
+; AVX512F-NEXT:    vpermd %zmm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
+; AVX512F-NEXT:    vpaddb 32(%rdx), %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa %ymm0, (%rcx)
+; AVX512F-NEXT:    vmovdqa %ymm1, 32(%rcx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
@@ -4290,15 +4291,16 @@ define void @vec384_i32_widen_to_i64_factor2_broadcast_to_v6i64_factor6(ptr %in.
 ; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512DQ-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX512DQ-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,21,0,23]
-; AVX512DQ-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
-; AVX512DQ-NEXT:    vextracti64x4 $1, %zmm2, %ymm0
-; AVX512DQ-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
-; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm2, %ymm1
-; AVX512DQ-NEXT:    vmovdqa %ymm1, (%rcx)
-; AVX512DQ-NEXT:    vmovdqa %ymm0, 32(%rcx)
+; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,13,0,15]
+; AVX512DQ-NEXT:    vpermd %zmm0, %zmm1, %zmm0
+; AVX512DQ-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
+; AVX512DQ-NEXT:    vpaddb 32(%rdx), %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
+; AVX512DQ-NEXT:    vmovdqa %ymm0, (%rcx)
+; AVX512DQ-NEXT:    vmovdqa %ymm1, 32(%rcx)
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    retq
 ;
@@ -4401,15 +4403,16 @@ define void @vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4(ptr %in.
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512F-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX512F-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,21,22,0]
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm2, %ymm0
-; AVX512F-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
-; AVX512F-NEXT:    vpaddb (%rdx), %ymm2, %ymm1
-; AVX512F-NEXT:    vmovdqa %ymm1, (%rcx)
-; AVX512F-NEXT:    vmovdqa %ymm0, 32(%rcx)
+; AVX512F-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,13,14,0]
+; AVX512F-NEXT:    vpermd %zmm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
+; AVX512F-NEXT:    vpaddb 32(%rdx), %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa %ymm0, (%rcx)
+; AVX512F-NEXT:    vmovdqa %ymm1, 32(%rcx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
@@ -4417,15 +4420,16 @@ define void @vec384_i32_widen_to_i96_factor3_broadcast_to_v4i96_factor4(ptr %in.
 ; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512DQ-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX512DQ-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,21,22,0]
-; AVX512DQ-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
-; AVX512DQ-NEXT:    vextracti64x4 $1, %zmm2, %ymm0
-; AVX512DQ-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
-; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm2, %ymm1
-; AVX512DQ-NEXT:    vmovdqa %ymm1, (%rcx)
-; AVX512DQ-NEXT:    vmovdqa %ymm0, 32(%rcx)
+; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,13,14,0]
+; AVX512DQ-NEXT:    vpermd %zmm0, %zmm1, %zmm0
+; AVX512DQ-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
+; AVX512DQ-NEXT:    vpaddb 32(%rdx), %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
+; AVX512DQ-NEXT:    vmovdqa %ymm0, (%rcx)
+; AVX512DQ-NEXT:    vmovdqa %ymm1, 32(%rcx)
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    retq
 ;
@@ -4536,15 +4540,16 @@ define void @vec384_i32_widen_to_i128_factor4_broadcast_to_v3i128_factor3(ptr %i
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512F-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX512F-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,21,22,23]
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm2, %ymm0
-; AVX512F-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
-; AVX512F-NEXT:    vpaddb (%rdx), %ymm2, %ymm1
-; AVX512F-NEXT:    vmovdqa %ymm1, (%rcx)
-; AVX512F-NEXT:    vmovdqa %ymm0, 32(%rcx)
+; AVX512F-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,13,14,15]
+; AVX512F-NEXT:    vpermd %zmm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
+; AVX512F-NEXT:    vpaddb 32(%rdx), %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa %ymm0, (%rcx)
+; AVX512F-NEXT:    vmovdqa %ymm1, 32(%rcx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
@@ -4552,15 +4557,16 @@ define void @vec384_i32_widen_to_i128_factor4_broadcast_to_v3i128_factor3(ptr %i
 ; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512DQ-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX512DQ-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,21,22,23]
-; AVX512DQ-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
-; AVX512DQ-NEXT:    vextracti64x4 $1, %zmm2, %ymm0
-; AVX512DQ-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
-; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm2, %ymm1
-; AVX512DQ-NEXT:    vmovdqa %ymm1, (%rcx)
-; AVX512DQ-NEXT:    vmovdqa %ymm0, 32(%rcx)
+; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,13,14,15]
+; AVX512DQ-NEXT:    vpermd %zmm0, %zmm1, %zmm0
+; AVX512DQ-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
+; AVX512DQ-NEXT:    vpaddb 32(%rdx), %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
+; AVX512DQ-NEXT:    vmovdqa %ymm0, (%rcx)
+; AVX512DQ-NEXT:    vmovdqa %ymm1, 32(%rcx)
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    retq
 ;
@@ -4649,11 +4655,12 @@ define void @vec384_i32_widen_to_i192_factor6_broadcast_to_v2i192_factor2(ptr %i
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512F-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX512F-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,21,22,23]
-; AVX512F-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
-; AVX512F-NEXT:    vpaddb (%rdx), %ymm2, %ymm0
+; AVX512F-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,13,14,15]
+; AVX512F-NEXT:    vpermd %zmm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
 ; AVX512F-NEXT:    vmovdqa %ymm0, (%rcx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -4662,11 +4669,12 @@ define void @vec384_i32_widen_to_i192_factor6_broadcast_to_v2i192_factor2(ptr %i
 ; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512DQ-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX512DQ-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,21,22,23]
-; AVX512DQ-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
-; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm2, %ymm0
+; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,13,14,15]
+; AVX512DQ-NEXT:    vpermd %zmm0, %zmm1, %zmm0
+; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vmovdqa %ymm0, (%rcx)
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    retq
@@ -4780,15 +4788,16 @@ define void @vec384_i64_widen_to_i128_factor2_broadcast_to_v3i128_factor3(ptr %i
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512F-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX512F-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,11,0,0,0,0,0,0,0]
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
-; AVX512F-NEXT:    vextracti64x4 $1, %zmm2, %ymm0
-; AVX512F-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
-; AVX512F-NEXT:    vpaddb (%rdx), %ymm2, %ymm1
-; AVX512F-NEXT:    vmovdqa %ymm1, (%rcx)
-; AVX512F-NEXT:    vmovdqa %ymm0, 32(%rcx)
+; AVX512F-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0]
+; AVX512F-NEXT:    vpermq %zmm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
+; AVX512F-NEXT:    vpaddb 32(%rdx), %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa %ymm0, (%rcx)
+; AVX512F-NEXT:    vmovdqa %ymm1, 32(%rcx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
@@ -4796,15 +4805,16 @@ define void @vec384_i64_widen_to_i128_factor2_broadcast_to_v3i128_factor3(ptr %i
 ; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512DQ-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX512DQ-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,11,0,0,0,0,0,0,0]
-; AVX512DQ-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
-; AVX512DQ-NEXT:    vextracti64x4 $1, %zmm2, %ymm0
-; AVX512DQ-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
-; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm2, %ymm1
-; AVX512DQ-NEXT:    vmovdqa %ymm1, (%rcx)
-; AVX512DQ-NEXT:    vmovdqa %ymm0, 32(%rcx)
+; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0]
+; AVX512DQ-NEXT:    vpermq %zmm0, %zmm1, %zmm0
+; AVX512DQ-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
+; AVX512DQ-NEXT:    vpaddb 32(%rdx), %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
+; AVX512DQ-NEXT:    vmovdqa %ymm0, (%rcx)
+; AVX512DQ-NEXT:    vmovdqa %ymm1, 32(%rcx)
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    retq
 ;
@@ -4907,11 +4917,12 @@ define void @vec384_i64_widen_to_i192_factor3_broadcast_to_v2i192_factor2(ptr %i
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512F-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX512F-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,11,0,0,0,0,0,0,0]
-; AVX512F-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
-; AVX512F-NEXT:    vpaddb (%rdx), %ymm2, %ymm0
+; AVX512F-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0]
+; AVX512F-NEXT:    vpermq %zmm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
 ; AVX512F-NEXT:    vmovdqa %ymm0, (%rcx)
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
@@ -4920,11 +4931,12 @@ define void @vec384_i64_widen_to_i192_factor3_broadcast_to_v2i192_factor2(ptr %i
 ; AVX512DQ:       # %bb.0:
 ; AVX512DQ-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512DQ-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpaddb 32(%rsi), %ymm1, %ymm1
-; AVX512DQ-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,11,0,0,0,0,0,0,0]
-; AVX512DQ-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
-; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm2, %ymm0
+; AVX512DQ-NEXT:    vpaddb (%rsi), %ymm0, %ymm0
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0]
+; AVX512DQ-NEXT:    vpermq %zmm0, %zmm1, %zmm0
+; AVX512DQ-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vmovdqa %ymm0, (%rcx)
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/avx-insertelt.ll b/llvm/test/CodeGen/X86/avx-insertelt.ll
index 18ca01290c914..d3ac3f1f64c83 100644
--- a/llvm/test/CodeGen/X86/avx-insertelt.ll
+++ b/llvm/test/CodeGen/X86/avx-insertelt.ll
@@ -270,10 +270,11 @@ define <16 x i16> @insert_i16_firstelts(<16 x i16> %x, i16 %s) {
 ; AVX2-LABEL: insert_i16_firstelts:
 ; AVX2:       # %bb.0:
 ; AVX2-NEXT:    vpinsrw $0, %edi, %xmm0, %xmm1
-; AVX2-NEXT:    vmovd %edi, %xmm2
-; AVX2-NEXT:    vpbroadcastw %xmm2, %ymm2
-; AVX2-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4,5,6,7],ymm2[8],ymm0[9,10,11,12,13,14,15]
 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT:    vmovd %edi, %xmm1
+; AVX2-NEXT:    vpbroadcastw %xmm1, %ymm1
+; AVX2-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1,2,3,4,5,6,7],ymm1[8],ymm0[9,10,11,12,13,14,15]
+; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX2-NEXT:    retq
   %i0 = insertelement <16 x i16> %x, i16 %s, i32 0
   %i1 = insertelement <16 x i16> %i0, i16 %s, i32 8

diff  --git a/llvm/test/CodeGen/X86/avx-vperm2x128.ll b/llvm/test/CodeGen/X86/avx-vperm2x128.ll
index e51b79fcfd4f7..acfd4e4a73e17 100644
--- a/llvm/test/CodeGen/X86/avx-vperm2x128.ll
+++ b/llvm/test/CodeGen/X86/avx-vperm2x128.ll
@@ -172,7 +172,8 @@ define <8 x i32> @shuffle_v8i32_u5u7cdef(<8 x i32> %a, <8 x i32> %b) nounwind uw
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
 ; AVX1-NEXT:    vpsubd %xmm2, %xmm0, %xmm0
-; AVX1-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: shuffle_v8i32_u5u7cdef:

diff  --git a/llvm/test/CodeGen/X86/avx512-insert-extract.ll b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
index 66c5bcfd67044..51e704ba303be 100644
--- a/llvm/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
@@ -690,18 +690,20 @@ define <16 x i16> @insert_v16i16(<16 x i16> %x, i16 %y, ptr %ptr) {
 ; KNL-LABEL: insert_v16i16:
 ; KNL:       ## %bb.0:
 ; KNL-NEXT:    vpinsrw $1, (%rsi), %xmm0, %xmm1
-; KNL-NEXT:    vmovd %edi, %xmm2
-; KNL-NEXT:    vpbroadcastw %xmm2, %ymm2
-; KNL-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6,7,8],ymm2[9],ymm0[10,11,12,13,14,15]
 ; KNL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; KNL-NEXT:    vmovd %edi, %xmm1
+; KNL-NEXT:    vpbroadcastw %xmm1, %ymm1
+; KNL-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7,8],ymm1[9],ymm0[10,11,12,13,14,15]
+; KNL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; KNL-NEXT:    retq
 ;
 ; SKX-LABEL: insert_v16i16:
 ; SKX:       ## %bb.0:
 ; SKX-NEXT:    vpinsrw $1, (%rsi), %xmm0, %xmm1
-; SKX-NEXT:    vpbroadcastw %edi, %ymm2
-; SKX-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6,7,8],ymm2[9],ymm0[10,11,12,13,14,15]
 ; SKX-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; SKX-NEXT:    vpbroadcastw %edi, %ymm1
+; SKX-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7,8],ymm1[9],ymm0[10,11,12,13,14,15]
+; SKX-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; SKX-NEXT:    retq
   %val = load i16, ptr %ptr
   %r1 = insertelement <16 x i16> %x, i16 %val, i32 1

diff  --git a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
index 6dc634e3daf25..26bdbeb77ccd4 100644
--- a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
+++ b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
@@ -2231,10 +2231,11 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask6(<8 x i64> %vec, <4 x i
 define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask7(<8 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) {
 ; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask7:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovdqa {{.*#+}} ymm3 = [2,0,3,4]
-; CHECK-NEXT:    vpermq %zmm0, %zmm3, %zmm0
+; CHECK-NEXT:    vextracti32x4 $2, %zmm0, %xmm3
+; CHECK-NEXT:    vmovdqa {{.*#+}} ymm4 = [2,0,3,4]
+; CHECK-NEXT:    vpermi2q %ymm3, %ymm0, %ymm4
 ; CHECK-NEXT:    vptestnmq %ymm2, %ymm2, %k1
-; CHECK-NEXT:    vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT:    vpblendmq %ymm4, %ymm1, %ymm0 {%k1}
 ; CHECK-NEXT:    retq
   %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 4>
   %cmp = icmp eq <4 x i64> %mask, zeroinitializer
@@ -2244,9 +2245,10 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask7(<8 x i64> %vec, <4 x i64
 define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask7(<8 x i64> %vec, <4 x i64> %mask) {
 ; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask7:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovdqa {{.*#+}} ymm2 = [2,0,3,4]
+; CHECK-NEXT:    vextracti32x4 $2, %zmm0, %xmm2
+; CHECK-NEXT:    vmovdqa {{.*#+}} ymm3 = [2,0,3,4]
 ; CHECK-NEXT:    vptestnmq %ymm1, %ymm1, %k1
-; CHECK-NEXT:    vpermq %zmm0, %zmm2, %zmm0 {%k1} {z}
+; CHECK-NEXT:    vpermt2q %ymm2, %ymm3, %ymm0 {%k1} {z}
 ; CHECK-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
 ; CHECK-NEXT:    retq
   %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> <i32 2, i32 0, i32 3, i32 4>
@@ -4101,8 +4103,8 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask5(<8 x double>
 define <4 x double> @test_8xdouble_to_4xdouble_perm_mask6(<8 x double> %vec) {
 ; CHECK-FAST-LABEL: test_8xdouble_to_4xdouble_perm_mask6:
 ; CHECK-FAST:       # %bb.0:
-; CHECK-FAST-NEXT:    vmovaps {{.*#+}} ymm1 = [5,0,7,0]
-; CHECK-FAST-NEXT:    vpermpd %zmm0, %zmm1, %zmm0
+; CHECK-FAST-NEXT:    vmovapd {{.*#+}} ymm1 = [5,8,7,8]
+; CHECK-FAST-NEXT:    vpermt2pd %zmm0, %zmm1, %zmm0
 ; CHECK-FAST-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
 ; CHECK-FAST-NEXT:    retq
 ;
@@ -4118,11 +4120,11 @@ define <4 x double> @test_8xdouble_to_4xdouble_perm_mask6(<8 x double> %vec) {
 define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask6(<8 x double> %vec, <4 x double> %vec2, <4 x double> %mask) {
 ; CHECK-FAST-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask6:
 ; CHECK-FAST:       # %bb.0:
-; CHECK-FAST-NEXT:    vmovapd {{.*#+}} ymm3 = [5,0,7,0]
-; CHECK-FAST-NEXT:    vpermpd %zmm0, %zmm3, %zmm0
-; CHECK-FAST-NEXT:    vxorpd %xmm3, %xmm3, %xmm3
-; CHECK-FAST-NEXT:    vcmpeqpd %ymm3, %ymm2, %k1
-; CHECK-FAST-NEXT:    vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-FAST-NEXT:    vmovapd {{.*#+}} ymm3 = [5,8,7,8]
+; CHECK-FAST-NEXT:    vpermi2pd %zmm0, %zmm0, %zmm3
+; CHECK-FAST-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
+; CHECK-FAST-NEXT:    vcmpeqpd %ymm0, %ymm2, %k1
+; CHECK-FAST-NEXT:    vblendmpd %ymm3, %ymm1, %ymm0 {%k1}
 ; CHECK-FAST-NEXT:    retq
 ;
 ; CHECK-FAST-PERLANE-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask6:
@@ -4143,10 +4145,10 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask6(<8 x double> %v
 define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask6(<8 x double> %vec, <4 x double> %mask) {
 ; CHECK-FAST-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask6:
 ; CHECK-FAST:       # %bb.0:
-; CHECK-FAST-NEXT:    vmovapd {{.*#+}} ymm2 = [5,0,7,0]
+; CHECK-FAST-NEXT:    vmovapd {{.*#+}} ymm2 = [5,8,7,8]
 ; CHECK-FAST-NEXT:    vxorpd %xmm3, %xmm3, %xmm3
 ; CHECK-FAST-NEXT:    vcmpeqpd %ymm3, %ymm1, %k1
-; CHECK-FAST-NEXT:    vpermpd %zmm0, %zmm2, %zmm0 {%k1} {z}
+; CHECK-FAST-NEXT:    vpermt2pd %zmm0, %zmm2, %zmm0 {%k1} {z}
 ; CHECK-FAST-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
 ; CHECK-FAST-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/horizontal-sum.ll b/llvm/test/CodeGen/X86/horizontal-sum.ll
index 4dc7fcc687811..c1011a6e326bd 100644
--- a/llvm/test/CodeGen/X86/horizontal-sum.ll
+++ b/llvm/test/CodeGen/X86/horizontal-sum.ll
@@ -233,7 +233,7 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
 ; AVX1-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
 ; AVX1-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
 ; AVX1-SLOW-NEXT:    vhaddps %xmm7, %xmm6, %xmm2
-; AVX1-SLOW-NEXT:    vhaddps %xmm0, %xmm2, %xmm2
+; AVX1-SLOW-NEXT:    vhaddps %xmm2, %xmm2, %xmm2
 ; AVX1-SLOW-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-SLOW-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[2]
 ; AVX1-SLOW-NEXT:    retq
@@ -277,7 +277,7 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
 ; AVX2-SLOW-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
 ; AVX2-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
 ; AVX2-SLOW-NEXT:    vhaddps %xmm7, %xmm6, %xmm2
-; AVX2-SLOW-NEXT:    vhaddps %xmm0, %xmm2, %xmm2
+; AVX2-SLOW-NEXT:    vhaddps %xmm2, %xmm2, %xmm2
 ; AVX2-SLOW-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX2-SLOW-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[2]
 ; AVX2-SLOW-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/matrix-multiply.ll b/llvm/test/CodeGen/X86/matrix-multiply.ll
index a607cb7834dc0..376e272a457db 100644
--- a/llvm/test/CodeGen/X86/matrix-multiply.ll
+++ b/llvm/test/CodeGen/X86/matrix-multiply.ll
@@ -1072,20 +1072,21 @@ define <16 x float> @test_mul4x4_f32(<16 x float> %a0, <16 x float> %a1) nounwin
 ; AVX512-NEXT:    vextractf32x4 $2, %zmm1, %xmm3
 ; AVX512-NEXT:    vextractf32x4 $3, %zmm1, %xmm4
 ; AVX512-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX512-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm2
-; AVX512-NEXT:    vinsertf64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512-NEXT:    vpermilps {{.*#+}} zmm4 = zmm2[2,2,2,2,6,6,6,6,10,10,10,10,14,14,14,14]
-; AVX512-NEXT:    vshuff64x2 {{.*#+}} zmm5 = zmm0[4,5,4,5,4,5,4,5]
+; AVX512-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX512-NEXT:    vinsertf64x4 $1, %ymm3, %zmm1, %zmm2
+; AVX512-NEXT:    vpermilps {{.*#+}} zmm4 = zmm2[1,1,1,1,5,5,5,5,9,9,9,9,13,13,13,13]
+; AVX512-NEXT:    vshuff64x2 {{.*#+}} zmm5 = zmm0[2,3,2,3,2,3,2,3]
 ; AVX512-NEXT:    vmulps %zmm4, %zmm5, %zmm4
-; AVX512-NEXT:    vpermilps {{.*#+}} zmm5 = zmm2[1,1,1,1,5,5,5,5,9,9,9,9,13,13,13,13]
-; AVX512-NEXT:    vshuff64x2 {{.*#+}} zmm6 = zmm0[2,3,2,3,2,3,2,3]
-; AVX512-NEXT:    vmulps %zmm5, %zmm6, %zmm5
+; AVX512-NEXT:    vpermilps {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
+; AVX512-NEXT:    vpermilps {{.*#+}} ymm3 = ymm3[0,0,0,0,4,4,4,4]
 ; AVX512-NEXT:    vinsertf64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512-NEXT:    vpermilps {{.*#+}} zmm1 = zmm1[0,0,0,0,4,4,4,4,8,8,8,8,12,12,12,12]
 ; AVX512-NEXT:    vshuff64x2 {{.*#+}} zmm3 = zmm0[0,1,0,1,0,1,0,1]
 ; AVX512-NEXT:    vmulps %zmm1, %zmm3, %zmm1
-; AVX512-NEXT:    vaddps %zmm5, %zmm1, %zmm1
 ; AVX512-NEXT:    vaddps %zmm4, %zmm1, %zmm1
+; AVX512-NEXT:    vpermilps {{.*#+}} zmm3 = zmm2[2,2,2,2,6,6,6,6,10,10,10,10,14,14,14,14]
+; AVX512-NEXT:    vshuff64x2 {{.*#+}} zmm4 = zmm0[4,5,4,5,4,5,4,5]
+; AVX512-NEXT:    vmulps %zmm3, %zmm4, %zmm3
+; AVX512-NEXT:    vaddps %zmm3, %zmm1, %zmm1
 ; AVX512-NEXT:    vpermilps {{.*#+}} zmm2 = zmm2[3,3,3,3,7,7,7,7,11,11,11,11,15,15,15,15]
 ; AVX512-NEXT:    vshuff64x2 {{.*#+}} zmm0 = zmm0[6,7,6,7,6,7,6,7]
 ; AVX512-NEXT:    vmulps %zmm2, %zmm0, %zmm0

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
index 806ea82194725..67370c65bc603 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
@@ -638,91 +638,48 @@ define void @load_i16_stride3_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512F-SLOW-LABEL: load_i16_stride3_vf16:
-; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm3
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm1, %ymm2, %ymm3
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
-; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm5[0,1],xmm4[2],xmm5[3,4],xmm4[5],xmm5[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,2,3,8,9,14,15,4,5,10,11]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm3[0,1,2],ymm6[3,4,5,6,7],ymm3[8,9,10],ymm6[11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,4,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm2, %ymm1, %ymm6
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm4[0,1],xmm5[2],xmm4[3,4],xmm5[5],xmm4[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,4,5,10,11,0,1,6,7,12,13]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm6[0,1,2],ymm7[3,4,5,6,7],ymm6[8,9,10],ymm7[11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,6,7,4]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm2, %ymm1, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0],xmm4[1],xmm5[2,3],xmm4[4],xmm5[5,6],xmm4[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,0,1,6,7,12,13,2,3,8,9,14,15]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm3, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm6, (%rdx)
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, (%rcx)
-; AVX512F-SLOW-NEXT:    vzeroupper
-; AVX512F-SLOW-NEXT:    retq
-;
-; AVX512F-FAST-LABEL: load_i16_stride3_vf16:
-; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm3
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm1, %ymm2, %ymm3
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
-; AVX512F-FAST-NEXT:    vmovdqa 80(%rdi), %xmm4
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm5
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm5[0,1],xmm4[2],xmm5[3,4],xmm4[5],xmm5[6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,2,3,8,9,14,15,4,5,10,11]
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm3[0,1,2],ymm6[3,4,5,6,7],ymm3[8,9,10],ymm6[11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,4,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm2, %ymm1, %ymm6
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm4[0,1],xmm5[2],xmm4[3,4],xmm5[5],xmm4[6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,4,5,10,11,0,1,6,7,12,13]
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm6[0,1,2],ymm7[3,4,5,6,7],ymm6[8,9,10],ymm7[11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,6,7,4]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm2, %ymm1, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0],xmm4[1],xmm5[2,3],xmm4[4],xmm5[5,6],xmm4[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,0,1,6,7,12,13,2,3,8,9,14,15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,9,10,11]
-; AVX512F-FAST-NEXT:    vpermi2d %ymm1, %ymm0, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa %ymm3, (%rsi)
-; AVX512F-FAST-NEXT:    vmovdqa %ymm6, (%rdx)
-; AVX512F-FAST-NEXT:    vmovdqa %ymm2, (%rcx)
-; AVX512F-FAST-NEXT:    vzeroupper
-; AVX512F-FAST-NEXT:    retq
+; AVX512F-LABEL: load_i16_stride3_vf16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm3
+; AVX512F-NEXT:    vpternlogq $202, %ymm1, %ymm2, %ymm3
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
+; AVX512F-NEXT:    vmovdqa 80(%rdi), %xmm4
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm5
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm6 = xmm5[0,1],xmm4[2],xmm5[3,4],xmm4[5],xmm5[6,7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,2,3,8,9,14,15,4,5,10,11]
+; AVX512F-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm6 = ymm3[0,1,2],ymm6[3,4,5,6,7],ymm3[8,9,10],ymm6[11,12,13,14,15]
+; AVX512F-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,4,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512F-NEXT:    vpternlogq $202, %ymm2, %ymm1, %ymm6
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm7 = xmm4[0,1],xmm5[2],xmm4[3,4],xmm5[5],xmm4[6,7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,4,5,10,11,0,1,6,7,12,13]
+; AVX512F-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm7 = ymm6[0,1,2],ymm7[3,4,5,6,7],ymm6[8,9,10],ymm7[11,12,13,14,15]
+; AVX512F-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,6,7,4]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-NEXT:    vpternlogq $202, %ymm2, %ymm1, %ymm0
+; AVX512F-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0],xmm4[1],xmm5[2,3],xmm4[4],xmm5[5,6],xmm4[7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,0,1,6,7,12,13,2,3,8,9,14,15]
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
+; AVX512F-NEXT:    vmovdqa %ymm3, (%rsi)
+; AVX512F-NEXT:    vmovdqa %ymm6, (%rdx)
+; AVX512F-NEXT:    vmovdqa %ymm0, (%rcx)
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: load_i16_stride3_vf16:
 ; AVX512BW:       # %bb.0:
@@ -1103,7 +1060,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm5
 ; AVX2-ONLY-NEXT:    vmovdqa 160(%rdi), %xmm6
 ; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm6[0,1],xmm5[2],xmm6[3,4],xmm5[5],xmm6[6,7]
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,u,u,2,3,8,9,14,15,4,5,10,11>
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm10 = [4,5,14,15,0,1,2,3,8,9,14,15,4,5,10,11]
 ; AVX2-ONLY-NEXT:    vpshufb %xmm10, %xmm9, %xmm9
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
 ; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm9 = ymm3[0,1,2],ymm9[3,4,5,6,7],ymm3[8,9,10],ymm9[11,12,13,14,15]
@@ -1128,7 +1085,7 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm12 = [2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
 ; AVX2-ONLY-NEXT:    vpshufb %ymm12, %ymm10, %ymm10
 ; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} xmm13 = xmm5[0,1],xmm6[2],xmm5[3,4],xmm6[5],xmm5[6,7]
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,u,u,u,4,5,10,11,0,1,6,7,12,13>
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm14 = [4,5,4,5,4,5,4,5,10,11,0,1,6,7,12,13]
 ; AVX2-ONLY-NEXT:    vpshufb %xmm14, %xmm13, %xmm13
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
 ; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm13 = ymm10[0,1,2],ymm13[3,4,5,6,7],ymm10[8,9,10],ymm13[11,12,13,14,15]
@@ -1172,163 +1129,84 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512F-SLOW-LABEL: load_i16_stride3_vf32:
-; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm6
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm5, %ymm6, %ymm1
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
-; AVX512F-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm2
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[0,1,6,7,12,13,2,3,8,9,14,15,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm4[0,1,2],ymm3[3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm8
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm9
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm3
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm9, %ymm8, %ymm3
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm3[0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
-; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm4[0,1],xmm3[2],xmm4[3,4],xmm3[5],xmm4[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,2,3,8,9,14,15,4,5,10,11]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm10[0,1,2],ymm11[3,4,5,6,7],ymm10[8,9,10],ymm11[11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,5,4,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm10, %zmm7
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm10
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm6, %ymm5, %ymm10
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm10[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7,8,9],ymm11[10],ymm10[11,12],ymm11[13],ymm10[14,15]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[2,3,8,9,14,15,4,5,10,11,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3,4],xmm10[5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm11, %ymm12
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm9, %ymm12
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm12[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm13[2],ymm12[3,4],ymm13[5],ymm12[6,7,8,9],ymm13[10],ymm12[11,12],ymm13[13],ymm12[14,15]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm3[0,1],xmm4[2],xmm3[3,4],xmm4[5],xmm3[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,u,u,u,u,4,5,10,11,0,1,6,7,12,13]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm13 = ymm12[0,1,2],ymm13[3,4,5,6,7],ymm12[8,9,10],ymm13[11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,5,6,7,4]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm12, %zmm10
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm5, %ymm6, %ymm11
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm11[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm11[1,2],ymm5[3],ymm11[4,5],ymm5[6],ymm11[7],ymm5[8],ymm11[9,10],ymm5[11],ymm11[12,13],ymm5[14],ymm11[15]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm5, %ymm5
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm9, %ymm0
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm0[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0],ymm0[1,2],ymm8[3],ymm0[4,5],ymm8[6],ymm0[7],ymm8[8],ymm0[9,10],ymm8[11],ymm0[12,13],ymm8[14],ymm0[15]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2,3],xmm3[4],xmm4[5,6],xmm3[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,0,1,6,7,12,13,2,3,8,9,14,15]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
-; AVX512F-SLOW-NEXT:    vextracti32x4 $2, %zmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm7, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm10, (%rdx)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, (%rcx)
-; AVX512F-SLOW-NEXT:    vzeroupper
-; AVX512F-SLOW-NEXT:    retq
-;
-; AVX512F-FAST-LABEL: load_i16_stride3_vf32:
-; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-FAST-NEXT:    vmovdqa 128(%rdi), %ymm5
-; AVX512F-FAST-NEXT:    vmovdqa 160(%rdi), %ymm6
-; AVX512F-FAST-NEXT:    vmovdqa %ymm2, %ymm0
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm5, %ymm6, %ymm0
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14],ymm1[15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
-; AVX512F-FAST-NEXT:    vmovdqa 112(%rdi), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %xmm1
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm1[0],xmm0[1],xmm1[2,3],xmm0[4],xmm1[5,6],xmm0[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[0,1,6,7,12,13,2,3,8,9,14,15,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm4[0,1,2],ymm3[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm8
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm9
-; AVX512F-FAST-NEXT:    vmovdqa %ymm2, %ymm3
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm9, %ymm8, %ymm3
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm3[0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
-; AVX512F-FAST-NEXT:    vmovdqa 80(%rdi), %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm4
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm4[0,1],xmm3[2],xmm4[3,4],xmm3[5],xmm4[6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,2,3,8,9,14,15,4,5,10,11]
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm10[0,1,2],ymm11[3,4,5,6,7],ymm10[8,9,10],ymm11[11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,5,4,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm10, %zmm7
-; AVX512F-FAST-NEXT:    vmovdqa %ymm2, %ymm10
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm6, %ymm5, %ymm10
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm10[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7,8,9],ymm11[10],ymm10[11,12],ymm11[13],ymm10[14,15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[2,3,8,9,14,15,4,5,10,11,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3,4],xmm10[5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vmovdqa %ymm11, %ymm12
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm8, %ymm9, %ymm12
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm12[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm13[2],ymm12[3,4],ymm13[5],ymm12[6,7,8,9],ymm13[10],ymm12[11,12],ymm13[13],ymm12[14,15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm3[0,1],xmm4[2],xmm3[3,4],xmm4[5],xmm3[6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,u,u,u,u,4,5,10,11,0,1,6,7,12,13]
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm12[0,1,2],ymm13[3,4,5,6,7],ymm12[8,9,10],ymm13[11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,5,6,7,4]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm12, %zmm10
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm5, %ymm6, %ymm11
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm11[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm11[1,2],ymm5[3],ymm11[4,5],ymm5[6],ymm11[7],ymm5[8],ymm11[9,10],ymm5[11],ymm11[12,13],ymm5[14],ymm11[15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm5, %ymm5
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm8, %ymm9, %ymm2
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm2[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm8[0],ymm2[1,2],ymm8[3],ymm2[4,5],ymm8[6],ymm2[7],ymm8[8],ymm2[9,10],ymm8[11],ymm2[12,13],ymm8[14],ymm2[15]
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2,3],xmm3[4],xmm4[5,6],xmm3[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,0,1,6,7,12,13,2,3,8,9,14,15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,9,10,11]
-; AVX512F-FAST-NEXT:    vpermi2d %ymm3, %ymm2, %ymm4
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4],xmm1[5],xmm0[6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm0
-; AVX512F-FAST-NEXT:    vextracti32x4 $2, %zmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm5[5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, (%rsi)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm10, (%rdx)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
-; AVX512F-FAST-NEXT:    vzeroupper
-; AVX512F-FAST-NEXT:    retq
+; AVX512F-LABEL: load_i16_stride3_vf32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
+; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm5
+; AVX512F-NEXT:    vmovdqa 160(%rdi), %ymm6
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm1
+; AVX512F-NEXT:    vpternlogq $202, %ymm5, %ymm6, %ymm1
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm3 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
+; AVX512F-NEXT:    vmovdqa 112(%rdi), %xmm1
+; AVX512F-NEXT:    vmovdqa 96(%rdi), %xmm2
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm4 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[0,1,6,7,12,13,2,3,8,9,14,15,u,u,u,u]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm7 = ymm4[0,1,2],ymm3[3,4,5,6,7]
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm8
+; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm9
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm3
+; AVX512F-NEXT:    vpternlogq $202, %ymm9, %ymm8, %ymm3
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm10 = ymm3[0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
+; AVX512F-NEXT:    vmovdqa 80(%rdi), %xmm3
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm11 = xmm4[0,1],xmm3[2],xmm4[3,4],xmm3[5],xmm4[6,7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,2,3,8,9,14,15,4,5,10,11]
+; AVX512F-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm11 = ymm10[0,1,2],ymm11[3,4,5,6,7],ymm10[8,9,10],ymm11[11,12,13,14,15]
+; AVX512F-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,5,4,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm7, %zmm10, %zmm7
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm10
+; AVX512F-NEXT:    vpternlogq $202, %ymm6, %ymm5, %ymm10
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm11 = ymm10[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7,8,9],ymm11[10],ymm10[11,12],ymm11[13],ymm10[14,15]
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm11 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[2,3,8,9,14,15,4,5,10,11,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3,4],xmm10[5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm11 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512F-NEXT:    vmovdqa %ymm11, %ymm12
+; AVX512F-NEXT:    vpternlogq $202, %ymm8, %ymm9, %ymm12
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm13 = ymm12[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm13[2],ymm12[3,4],ymm13[5],ymm12[6,7,8,9],ymm13[10],ymm12[11,12],ymm13[13],ymm12[14,15]
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm13 = xmm3[0,1],xmm4[2],xmm3[3,4],xmm4[5],xmm3[6,7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,u,u,u,u,4,5,10,11,0,1,6,7,12,13]
+; AVX512F-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm13 = ymm12[0,1,2],ymm13[3,4,5,6,7],ymm12[8,9,10],ymm13[11,12,13,14,15]
+; AVX512F-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,5,6,7,4]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm12, %zmm10
+; AVX512F-NEXT:    vpternlogq $202, %ymm5, %ymm6, %ymm11
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm5 = ymm11[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm11[1,2],ymm5[3],ymm11[4,5],ymm5[6],ymm11[7],ymm5[8],ymm11[9,10],ymm5[11],ymm11[12,13],ymm5[14],ymm11[15]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
+; AVX512F-NEXT:    vpshufb %ymm6, %ymm5, %ymm5
+; AVX512F-NEXT:    vpternlogq $202, %ymm8, %ymm9, %ymm0
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm8 = ymm0[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0],ymm0[1,2],ymm8[3],ymm0[4,5],ymm8[6],ymm0[7],ymm8[8],ymm0[9,10],ymm8[11],ymm0[12,13],ymm8[14],ymm0[15]
+; AVX512F-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2,3],xmm3[4],xmm4[5,6],xmm3[7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,0,1,6,7,12,13,2,3,8,9,14,15]
+; AVX512F-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5,6,7]
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512F-NEXT:    vextracti32x4 $2, %zmm1, %xmm1
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm7, (%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm10, (%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm0, (%rcx)
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: load_i16_stride3_vf32:
 ; AVX512BW:       # %bb.0:
@@ -2131,7 +2009,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpblendvb %ymm4, %ymm2, %ymm1, %ymm12
 ; AVX2-ONLY-NEXT:    vmovdqa 160(%rdi), %xmm14
 ; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm14[0,1],xmm7[2],xmm14[3,4],xmm7[5],xmm14[6,7]
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,u,2,3,8,9,14,15,4,5,10,11>
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm4 = [4,5,14,15,0,1,2,3,8,9,14,15,4,5,10,11]
 ; AVX2-ONLY-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
@@ -2181,7 +2059,7 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} ymm13 = [2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
 ; AVX2-ONLY-NEXT:    vpshufb %ymm13, %ymm4, %ymm4
 ; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0,1],xmm14[2],xmm7[3,4],xmm14[5],xmm7[6,7]
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,4,5,10,11,0,1,6,7,12,13>
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm5 = [4,5,4,5,4,5,4,5,10,11,0,1,6,7,12,13]
 ; AVX2-ONLY-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX2-ONLY-NEXT:    vpblendw {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3,4,5,6,7],ymm4[8,9,10],ymm0[11,12,13,14,15]
@@ -2268,348 +2146,177 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512F-SLOW-LABEL: load_i16_stride3_vf64:
-; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-SLOW-NEXT:    vmovdqa64 224(%rdi), %ymm20
-; AVX512F-SLOW-NEXT:    vmovdqa64 192(%rdi), %ymm21
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm1
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7],ymm1[8],ymm3[9],ymm1[10,11],ymm3[12],ymm1[13,14],ymm3[15]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm1, %ymm6
-; AVX512F-SLOW-NEXT:    vmovdqa 272(%rdi), %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa 256(%rdi), %xmm2
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm2[0,1],xmm8[2],xmm2[3,4],xmm8[5],xmm2[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, %xmm14
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,2,3,8,9,14,15,4,5,10,11>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm6[0,1,2],ymm7[3,4,5,6,7],ymm6[8,9,10],ymm7[11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,5,4,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm6[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 320(%rdi), %ymm22
-; AVX512F-SLOW-NEXT:    vmovdqa64 352(%rdi), %ymm23
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm6
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm22, %ymm23, %ymm6
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3],ymm7[4],ymm6[5,6],ymm7[7],ymm6[8],ymm7[9],ymm6[10,11],ymm7[12],ymm6[13,14],ymm7[15]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm6, %ymm12
-; AVX512F-SLOW-NEXT:    vmovdqa 304(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa 288(%rdi), %xmm2
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm1, %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = [0,1,6,7,12,13,2,3,8,9,14,15,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm10, %zmm18
-; AVX512F-SLOW-NEXT:    vmovdqa64 128(%rdi), %ymm24
-; AVX512F-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm13
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm10
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm24, %ymm13, %ymm10
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm10[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0],ymm12[1],ymm10[2,3],ymm12[4],ymm10[5,6],ymm12[7],ymm10[8],ymm12[9],ymm10[10,11],ymm12[12],ymm10[13,14],ymm12[15]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm10, %ymm10
-; AVX512F-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm11
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm12
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm12[0],xmm11[1],xmm12[2,3],xmm11[4],xmm12[5,6],xmm11[7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2],ymm10[3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm16
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm15
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm5
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm15, %ymm16, %ymm5
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm5[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm10[1],ymm5[2,3],ymm10[4],ymm5[5,6],ymm10[7],ymm5[8],ymm10[9],ymm5[10,11],ymm10[12],ymm5[13,14],ymm10[15]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm5, %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm10
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm10[0,1],xmm5[2],xmm10[3,4],xmm5[5],xmm10[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,4,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm19
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm23, %ymm22, %ymm1
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm28
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm6[2],xmm4[3,4],xmm6[5],xmm4[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm25
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm26
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = [2,3,8,9,14,15,4,5,10,11,10,11,10,11,10,11]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm1[5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm9, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm1
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm1[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7,8,9],ymm4[10],ymm1[11,12],ymm4[13],ymm1[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm14, %xmm7
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm8[0,1],xmm14[2],xmm8[3,4],xmm14[5],xmm8[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm8, %xmm27
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,4,5,10,11,0,1,6,7,12,13>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm1[0,1,2],ymm14[3,4,5,6,7],ymm1[8,9,10],ymm14[11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,7,4]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm17
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm13, %ymm24, %ymm1
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7,8,9],ymm3[10],ymm1[11,12],ymm3[13],ymm1[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm3
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm12[0,1],xmm11[2],xmm12[3,4],xmm11[5],xmm12[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm1[5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm9, %ymm3
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm16, %ymm15, %ymm3
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm3[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7,8,9],ymm6[10],ymm3[11,12],ymm6[13],ymm3[14,15]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1],xmm10[2],xmm5[3,4],xmm10[5],xmm5[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,6,7,4]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm24, %ymm9, %ymm13
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm13[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm13[1,2],ymm2[3],ymm13[4,5],ymm2[6],ymm13[7],ymm2[8],ymm13[9,10],ymm2[11],ymm13[12,13],ymm2[14],ymm13[15]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm16, %ymm0, %ymm15
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm15[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm15[1,2],ymm3[3],ymm15[4,5],ymm3[6],ymm15[7],ymm3[8],ymm15[9,10],ymm3[11],ymm15[12,13],ymm3[14],ymm15[15]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm10[0],xmm5[1],xmm10[2,3],xmm5[4],xmm10[5,6],xmm5[7]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,2,3,0,1,6,7,12,13,2,3,8,9,14,15]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm5[5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm2, %ymm2
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm11[0,1],xmm12[2],xmm11[3,4],xmm12[5],xmm11[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = [4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm5
-; AVX512F-SLOW-NEXT:    vextracti32x4 $2, %zmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm2[5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm22, %ymm23, %ymm9
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm9[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm9[1,2],ymm3[3],ymm9[4,5],ymm3[6],ymm9[7],ymm3[8],ymm9[9,10],ymm3[11],ymm9[12,13],ymm3[14],ymm9[15]
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm0
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm0[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0],ymm0[1,2],ymm5[3],ymm0[4,5],ymm5[6],ymm0[7],ymm5[8],ymm0[9,10],ymm5[11],ymm0[12,13],ymm5[14],ymm0[15]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm4
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2,3],xmm4[4],xmm7[5,6],xmm4[7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm5
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3,4],xmm5[5],xmm4[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
-; AVX512F-SLOW-NEXT:    vextracti32x4 $2, %zmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm19, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm18, 64(%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm17, 64(%rdx)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm1, (%rdx)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, 64(%rcx)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, (%rcx)
-; AVX512F-SLOW-NEXT:    vzeroupper
-; AVX512F-SLOW-NEXT:    retq
-;
-; AVX512F-FAST-LABEL: load_i16_stride3_vf64:
-; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-FAST-NEXT:    vmovdqa64 224(%rdi), %ymm20
-; AVX512F-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm21
-; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7],ymm1[8],ymm3[9],ymm1[10,11],ymm3[12],ymm1[13,14],ymm3[15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm5
-; AVX512F-FAST-NEXT:    vmovdqa 272(%rdi), %xmm8
-; AVX512F-FAST-NEXT:    vmovdqa 256(%rdi), %xmm2
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm2[0,1],xmm8[2],xmm2[3,4],xmm8[5],xmm2[6,7]
-; AVX512F-FAST-NEXT:    vmovdqa %xmm2, %xmm14
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,2,3,8,9,14,15,4,5,10,11>
-; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm6
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm5[0,1,2],ymm6[3,4,5,6,7],ymm5[8,9,10],ymm6[11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,4,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa64 320(%rdi), %ymm22
-; AVX512F-FAST-NEXT:    vmovdqa64 352(%rdi), %ymm23
-; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm6
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm22, %ymm23, %ymm6
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3],ymm7[4],ymm6[5,6],ymm7[7],ymm6[8],ymm7[9],ymm6[10,11],ymm7[12],ymm6[13,14],ymm7[15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm6, %ymm12
-; AVX512F-FAST-NEXT:    vmovdqa 304(%rdi), %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa 288(%rdi), %xmm2
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
-; AVX512F-FAST-NEXT:    vmovdqa %xmm2, %xmm4
-; AVX512F-FAST-NEXT:    vmovdqa %xmm1, %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [0,1,6,7,12,13,2,3,8,9,14,15,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm13, %xmm13
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm5, %zmm16
-; AVX512F-FAST-NEXT:    vmovdqa64 128(%rdi), %ymm24
-; AVX512F-FAST-NEXT:    vmovdqa 160(%rdi), %ymm13
-; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm5
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm24, %ymm13, %ymm5
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm5[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2,3],ymm12[4],ymm5[5,6],ymm12[7],ymm5[8],ymm12[9],ymm5[10,11],ymm12[12],ymm5[13,14],ymm12[15]
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm5, %ymm5
-; AVX512F-FAST-NEXT:    vmovdqa 112(%rdi), %xmm11
-; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %xmm12
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm12[0],xmm11[1],xmm12[2,3],xmm11[4],xmm12[5,6],xmm11[7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1,2],ymm5[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa64 (%rdi), %ymm17
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm5
-; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm10
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm5, %ymm17, %ymm10
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm10[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0],ymm15[1],ymm10[2,3],ymm15[4],ymm10[5,6],ymm15[7],ymm10[8],ymm15[9],ymm10[10,11],ymm15[12],ymm10[13,14],ymm15[15]
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm10, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa 80(%rdi), %xmm10
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm15
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm15[0,1],xmm10[2],xmm15[3,4],xmm10[5],xmm15[6,7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,4,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm18
-; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm23, %ymm22, %ymm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm28
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm6[2],xmm4[3,4],xmm6[5],xmm4[6,7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm6, %xmm25
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm26
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [2,3,8,9,14,15,4,5,10,11,10,11,10,11,10,11]
-; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm1[5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vmovdqa %ymm9, %ymm1
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm1[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7,8,9],ymm4[10],ymm1[11,12],ymm4[13],ymm1[14,15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa %xmm14, %xmm7
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm8[0,1],xmm14[2],xmm8[3,4],xmm14[5],xmm8[6,7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm8, %xmm27
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,4,5,10,11,0,1,6,7,12,13>
-; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm1[0,1,2],ymm14[3,4,5,6,7],ymm1[8,9,10],ymm14[11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,7,4]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm19
-; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm13, %ymm24, %ymm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7,8,9],ymm3[10],ymm1[11,12],ymm3[13],ymm1[14,15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm28, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm12[0,1],xmm11[2],xmm12[3,4],xmm11[5],xmm12[6,7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm1[5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa %ymm9, %ymm3
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm5, %ymm3
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm3[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7,8,9],ymm6[10],ymm3[11,12],ymm6[13],ymm3[14,15]
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm10[0,1],xmm15[2],xmm10[3,4],xmm15[5],xmm10[6,7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,6,7,4]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm24, %ymm9, %ymm13
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm13[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm13[1,2],ymm2[3],ymm13[4,5],ymm2[6],ymm13[7],ymm2[8],ymm13[9,10],ymm2[11],ymm13[12,13],ymm2[14],ymm13[15]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm17, %ymm0, %ymm5
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm5[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm5[1,2],ymm3[3],ymm5[4,5],ymm3[6],ymm5[7],ymm3[8],ymm5[9,10],ymm3[11],ymm5[12,13],ymm3[14],ymm5[15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm15[0],xmm10[1],xmm15[2,3],xmm10[4],xmm15[5,6],xmm10[7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,2,3,0,1,6,7,12,13,2,3,8,9,14,15]
-; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm5, %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,1,2,3,4,9,10,11]
-; AVX512F-FAST-NEXT:    vpermt2d %ymm5, %ymm8, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm11[0,1],xmm12[2],xmm11[3,4],xmm12[5],xmm11[6,7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
-; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm5
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm5
-; AVX512F-FAST-NEXT:    vextracti32x4 $2, %zmm5, %xmm5
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm2[5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm22, %ymm23, %ymm9
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm9[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm9[1,2],ymm3[3],ymm9[4,5],ymm3[6],ymm9[7],ymm3[8],ymm9[9,10],ymm3[11],ymm9[12,13],ymm3[14],ymm9[15]
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm0
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm0[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0],ymm0[1,2],ymm5[3],ymm0[4,5],ymm5[6],ymm0[7],ymm5[8],ymm0[9,10],ymm5[11],ymm0[12,13],ymm5[14],ymm0[15]
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm27, %xmm4
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2,3],xmm4[4],xmm7[5,6],xmm4[7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpermt2d %ymm4, %ymm8, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm25, %xmm4
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm26, %xmm5
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3,4],xmm5[5],xmm4[6,7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
-; AVX512F-FAST-NEXT:    vextracti32x4 $2, %zmm4, %xmm4
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm18, (%rsi)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm16, 64(%rsi)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm19, 64(%rdx)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, (%rdx)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, 64(%rcx)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, (%rcx)
-; AVX512F-FAST-NEXT:    vzeroupper
-; AVX512F-FAST-NEXT:    retq
+; AVX512F-LABEL: load_i16_stride3_vf64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
+; AVX512F-NEXT:    vmovdqa64 224(%rdi), %ymm20
+; AVX512F-NEXT:    vmovdqa64 192(%rdi), %ymm21
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm1
+; AVX512F-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm1
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7],ymm1[8],ymm3[9],ymm1[10,11],ymm3[12],ymm1[13,14],ymm3[15]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
+; AVX512F-NEXT:    vpshufb %ymm3, %ymm1, %ymm6
+; AVX512F-NEXT:    vmovdqa 272(%rdi), %xmm8
+; AVX512F-NEXT:    vmovdqa 256(%rdi), %xmm2
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm7 = xmm2[0,1],xmm8[2],xmm2[3,4],xmm8[5],xmm2[6,7]
+; AVX512F-NEXT:    vmovdqa %xmm2, %xmm14
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm9 = [4,5,14,15,0,1,2,3,8,9,14,15,4,5,10,11]
+; AVX512F-NEXT:    vpshufb %xmm9, %xmm7, %xmm7
+; AVX512F-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm7 = ymm6[0,1,2],ymm7[3,4,5,6,7],ymm6[8,9,10],ymm7[11,12,13,14,15]
+; AVX512F-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,5,4,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm10 = ymm6[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 320(%rdi), %ymm22
+; AVX512F-NEXT:    vmovdqa64 352(%rdi), %ymm23
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm6
+; AVX512F-NEXT:    vpternlogq $202, %ymm22, %ymm23, %ymm6
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3],ymm7[4],ymm6[5,6],ymm7[7],ymm6[8],ymm7[9],ymm6[10,11],ymm7[12],ymm6[13,14],ymm7[15]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm11 = [0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
+; AVX512F-NEXT:    vpshufb %ymm11, %ymm6, %ymm12
+; AVX512F-NEXT:    vmovdqa 304(%rdi), %xmm1
+; AVX512F-NEXT:    vmovdqa 288(%rdi), %xmm2
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm13 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
+; AVX512F-NEXT:    vmovdqa %xmm2, %xmm4
+; AVX512F-NEXT:    vmovdqa %xmm1, %xmm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm15 = [0,1,6,7,12,13,2,3,8,9,14,15,12,13,14,15]
+; AVX512F-NEXT:    vpshufb %xmm15, %xmm13, %xmm13
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3,4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm12, %zmm10, %zmm18
+; AVX512F-NEXT:    vmovdqa64 128(%rdi), %ymm24
+; AVX512F-NEXT:    vmovdqa 160(%rdi), %ymm13
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm10
+; AVX512F-NEXT:    vpternlogq $202, %ymm24, %ymm13, %ymm10
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm12 = ymm10[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0],ymm12[1],ymm10[2,3],ymm12[4],ymm10[5,6],ymm12[7],ymm10[8],ymm12[9],ymm10[10,11],ymm12[12],ymm10[13,14],ymm12[15]
+; AVX512F-NEXT:    vpshufb %ymm11, %ymm10, %ymm10
+; AVX512F-NEXT:    vmovdqa 112(%rdi), %xmm11
+; AVX512F-NEXT:    vmovdqa 96(%rdi), %xmm12
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm5 = xmm12[0],xmm11[1],xmm12[2,3],xmm11[4],xmm12[5,6],xmm11[7]
+; AVX512F-NEXT:    vpshufb %xmm15, %xmm5, %xmm5
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2],ymm10[3,4,5,6,7]
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %ymm16
+; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm15
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm5
+; AVX512F-NEXT:    vpternlogq $202, %ymm15, %ymm16, %ymm5
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm10 = ymm5[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm10[1],ymm5[2,3],ymm10[4],ymm5[5,6],ymm10[7],ymm5[8],ymm10[9],ymm5[10,11],ymm10[12],ymm5[13,14],ymm10[15]
+; AVX512F-NEXT:    vpshufb %ymm3, %ymm5, %ymm2
+; AVX512F-NEXT:    vmovdqa 80(%rdi), %xmm5
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm10
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm3 = xmm10[0,1],xmm5[2],xmm10[3,4],xmm5[5],xmm10[6,7]
+; AVX512F-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
+; AVX512F-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
+; AVX512F-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,4,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm19
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm1
+; AVX512F-NEXT:    vpternlogq $202, %ymm23, %ymm22, %ymm1
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
+; AVX512F-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vmovdqa64 %ymm2, %ymm28
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm6[2],xmm4[3,4],xmm6[5],xmm4[6,7]
+; AVX512F-NEXT:    vmovdqa64 %xmm6, %xmm25
+; AVX512F-NEXT:    vmovdqa64 %xmm4, %xmm26
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = [2,3,8,9,14,15,4,5,10,11,10,11,10,11,10,11]
+; AVX512F-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm1[5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512F-NEXT:    vmovdqa %ymm9, %ymm1
+; AVX512F-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm1
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm4 = ymm1[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7,8,9],ymm4[10],ymm1[11,12],ymm4[13],ymm1[14,15]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
+; AVX512F-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vmovdqa %xmm14, %xmm7
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm14 = xmm8[0,1],xmm14[2],xmm8[3,4],xmm14[5],xmm8[6,7]
+; AVX512F-NEXT:    vmovdqa64 %xmm8, %xmm27
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [4,5,4,5,4,5,4,5,10,11,0,1,6,7,12,13]
+; AVX512F-NEXT:    vpshufb %xmm2, %xmm14, %xmm14
+; AVX512F-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm14 = ymm1[0,1,2],ymm14[3,4,5,6,7],ymm1[8,9,10],ymm14[11,12,13,14,15]
+; AVX512F-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,7,4]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm17
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm1
+; AVX512F-NEXT:    vpternlogq $202, %ymm13, %ymm24, %ymm1
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7,8,9],ymm3[10],ymm1[11,12],ymm3[13],ymm1[14,15]
+; AVX512F-NEXT:    vmovdqa64 %ymm28, %ymm3
+; AVX512F-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm3 = xmm12[0,1],xmm11[2],xmm12[3,4],xmm11[5],xmm12[6,7]
+; AVX512F-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm1[5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT:    vmovdqa %ymm9, %ymm3
+; AVX512F-NEXT:    vpternlogq $202, %ymm16, %ymm15, %ymm3
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm6 = ymm3[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7,8,9],ymm6[10],ymm3[11,12],ymm6[13],ymm3[14,15]
+; AVX512F-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1],xmm10[2],xmm5[3,4],xmm10[5],xmm5[6,7]
+; AVX512F-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
+; AVX512F-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15]
+; AVX512F-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,6,7,4]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512F-NEXT:    vpternlogq $226, %ymm24, %ymm9, %ymm13
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm2 = ymm13[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm13[1,2],ymm2[3],ymm13[4,5],ymm2[6],ymm13[7],ymm2[8],ymm13[9,10],ymm2[11],ymm13[12,13],ymm2[14],ymm13[15]
+; AVX512F-NEXT:    vpternlogq $226, %ymm16, %ymm0, %ymm15
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm3 = ymm15[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm15[1,2],ymm3[3],ymm15[4,5],ymm3[6],ymm15[7],ymm3[8],ymm15[9,10],ymm3[11],ymm15[12,13],ymm3[14],ymm15[15]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
+; AVX512F-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm5 = xmm10[0],xmm5[1],xmm10[2,3],xmm5[4],xmm10[5,6],xmm5[7]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,2,3,0,1,6,7,12,13,2,3,8,9,14,15]
+; AVX512F-NEXT:    vpshufb %xmm6, %xmm5, %xmm5
+; AVX512F-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm5[5,6,7]
+; AVX512F-NEXT:    vpshufb %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm5 = xmm11[0,1],xmm12[2],xmm11[3,4],xmm12[5],xmm11[6,7]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm8 = [4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
+; AVX512F-NEXT:    vpshufb %xmm8, %xmm5, %xmm5
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm5
+; AVX512F-NEXT:    vextracti32x4 $2, %zmm5, %xmm5
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm2[5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512F-NEXT:    vpternlogq $202, %ymm22, %ymm23, %ymm9
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm3 = ymm9[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm9[1,2],ymm3[3],ymm9[4,5],ymm3[6],ymm9[7],ymm3[8],ymm9[9,10],ymm3[11],ymm9[12,13],ymm3[14],ymm9[15]
+; AVX512F-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm0
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm5 = ymm0[2,3,0,1]
+; AVX512F-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0],ymm0[1,2],ymm5[3],ymm0[4,5],ymm5[6],ymm0[7],ymm5[8],ymm0[9,10],ymm5[11],ymm0[12,13],ymm5[14],ymm0[15]
+; AVX512F-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa64 %xmm27, %xmm4
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2,3],xmm4[4],xmm7[5,6],xmm4[7]
+; AVX512F-NEXT:    vpshufb %xmm6, %xmm4, %xmm4
+; AVX512F-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5,6,7]
+; AVX512F-NEXT:    vmovdqa64 %xmm25, %xmm4
+; AVX512F-NEXT:    vmovdqa64 %xmm26, %xmm5
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3,4],xmm5[5],xmm4[6,7]
+; AVX512F-NEXT:    vpshufb %xmm8, %xmm4, %xmm4
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
+; AVX512F-NEXT:    vextracti32x4 $2, %zmm4, %xmm4
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm19, (%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm18, 64(%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm17, 64(%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm1, (%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm0, 64(%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm2, (%rcx)
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: load_i16_stride3_vf64:
 ; AVX512BW:       # %bb.0:

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
index 7190c146aae05..2c8d6573cdd14 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-4.ll
@@ -644,42 +644,43 @@ define void @load_i16_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-LABEL: load_i16_stride4_vf16:
 ; AVX1-ONLY:       # %bb.0:
 ; AVX1-ONLY-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX1-ONLY-NEXT:    vmovdqa 112(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0],xmm1[1,2,3],xmm4[4],xmm1[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0],xmm1[1,2,3],xmm5[4],xmm1[5,6,7]
-; AVX1-ONLY-NEXT:    vpackusdw %xmm0, %xmm2, %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa 80(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vmovdqa 112(%rdi), %xmm5
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm5[0],xmm1[1,2,3],xmm5[4],xmm1[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm6
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm6[0],xmm1[1,2,3],xmm6[4],xmm1[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0],xmm1[1,2,3],xmm7[4],xmm1[5,6,7]
+; AVX1-ONLY-NEXT:    vpackusdw %xmm0, %xmm2, %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa 80(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm7[0],xmm1[1,2,3],xmm7[4],xmm1[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm8[0],xmm1[1,2,3],xmm8[4],xmm1[5,6,7]
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm2, %xmm3, %xmm2
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm0, %xmm2, %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa (%rdi), %xmm2
 ; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vmovdqa 32(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vmovdqa 32(%rdi), %xmm4
 ; AVX1-ONLY-NEXT:    vmovdqa 48(%rdi), %xmm9
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm9[0],xmm1[1,2,3],xmm9[4],xmm1[5,6,7]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm8[0],xmm1[1,2,3],xmm8[4],xmm1[5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm4[0],xmm1[1,2,3],xmm4[4],xmm1[5,6,7]
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm10, %xmm11, %xmm10
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm3[0],xmm1[1,2,3],xmm3[4],xmm1[5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3],xmm2[4],xmm1[5,6,7]
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm11, %xmm1, %xmm1
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm10, %xmm1, %xmm1
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm4[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm5[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm5[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm6[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[0,1,1,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm6[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm7[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[1,3,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm12 = xmm7[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm12 = xmm8[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[1,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3],xmm10[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm10
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm9[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm12 = xmm8[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm12 = xmm4[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[0,1,1,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm12 = xmm3[0,2,2,3]
@@ -688,22 +689,23 @@ define void @load_i16_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[1,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm12[0,1,2,3],xmm11[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm11, %ymm10
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[3,1,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm4[0,1,2,0,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[3,1,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm5[0,1,2,0,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm5[0,1,2,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[3,1,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm6[2,0,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm6[0,1,2,0,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[3,1,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm7[2,0,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm7[2,0,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm8[2,0,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm12[0,1,2,3],xmm11[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm11
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[3,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm9[0,1,2,0,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[3,1,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm8[0,1,2,0,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm4[0,1,2,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm3[2,0,2,3,4,5,6,7]
@@ -711,22 +713,23 @@ define void @load_i16_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm2[2,0,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm12 = xmm13[0,1,2,3],xmm12[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm12, %ymm11
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,3,1,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,1,3,1,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm6[3,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm7[3,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm9[0,1,3,1,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm8[0,1,3,1,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,3,1,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm7[3,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm8[3,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm9[0,1,3,1,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,3,1,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, (%rsi)
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, 16(%rsi)
 ; AVX1-ONLY-NEXT:    vmovaps %ymm10, (%rdx)
@@ -1017,62 +1020,67 @@ define void @load_i16_stride4_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-SLOW:       # %bb.0:
 ; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm0
 ; AVX512F-SLOW-NEXT:    vpmovqw %ymm0, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm2
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm1[0,1,0,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = mem[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpmovqw %ymm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,5,0,5]
-; AVX512F-SLOW-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm3, %ymm4, %ymm0
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm1
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm2[0,1,0,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm4
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm5[0,1,0,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm3
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm3, %xmm5
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm7
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm7[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm3, %xmm6
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm6
+; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm7
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm7[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[1,3,2,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm6[0,2,2,3]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[1,3,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm5[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[1,3,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm1, %ymm4, %ymm8
-; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm3, %zmm1
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm2[0,1,2,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm8[0],xmm5[0],xmm8[1],xmm5[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3,4,5],ymm2[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm3, %zmm5
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm5, %xmm5
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm1[0,1,2,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm4[0,1,2,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm8[0],xmm5[0],xmm8[1],xmm5[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm7[0,1,2,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm7[2,0,2,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[3,1,2,3]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm6[2,0,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm5[2,0,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm8, %ymm4, %ymm9
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3,4,5],ymm5[6,7]
 ; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm3, %zmm8
 ; AVX512F-SLOW-NEXT:    vpmovqw %zmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,3,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,3,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm7[3,1,2,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[3,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[3,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm2, %ymm4, %ymm5
-; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm3, %zmm2
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm1[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm3, %zmm3
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm3, %xmm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, (%rdx)
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm8, (%rcx)
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm2, (%r8)
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm2, (%rdx)
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm5, (%rcx)
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, (%r8)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
@@ -1403,189 +1411,186 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX1-ONLY-LABEL: load_i16_stride4_vf32:
 ; AVX1-ONLY:       # %bb.0:
-; AVX1-ONLY-NEXT:    subq $296, %rsp # imm = 0x128
-; AVX1-ONLY-NEXT:    vpxor %xmm0, %xmm0, %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa 112(%rdi), %xmm15
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm15[0],xmm0[1,2,3],xmm15[4],xmm0[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm0[1,2,3],xmm2[4],xmm0[5,6,7]
-; AVX1-ONLY-NEXT:    vpackusdw %xmm1, %xmm2, %xmm3
-; AVX1-ONLY-NEXT:    vmovdqa 80(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm1[0],xmm0[1,2,3],xmm1[4],xmm0[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm6 = xmm1[0],xmm0[1,2,3],xmm1[4],xmm0[5,6,7]
-; AVX1-ONLY-NEXT:    vpackusdw %xmm5, %xmm6, %xmm5
-; AVX1-ONLY-NEXT:    vpackusdw %xmm3, %xmm5, %xmm3
+; AVX1-ONLY-NEXT:    subq $232, %rsp
+; AVX1-ONLY-NEXT:    vpxor %xmm5, %xmm5, %xmm5
+; AVX1-ONLY-NEXT:    vmovdqa 112(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0],xmm5[1,2,3],xmm3[4],xmm5[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa (%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vmovdqa 32(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vmovdqa 48(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm2[0],xmm0[1,2,3],xmm2[4],xmm0[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm7 = xmm4[0],xmm0[1,2,3],xmm4[4],xmm0[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm11[0],xmm5[1,2,3],xmm11[4],xmm5[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpackusdw %xmm0, %xmm2, %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa 80(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm6 = xmm4[0],xmm5[1,2,3],xmm4[4],xmm5[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpackusdw %xmm5, %xmm7, %xmm5
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm7 = xmm6[0],xmm0[1,2,3],xmm6[4],xmm0[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm0[1,2,3],xmm9[4],xmm0[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm9, %xmm10
-; AVX1-ONLY-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpackusdw %xmm7, %xmm8, %xmm7
-; AVX1-ONLY-NEXT:    vpackusdw %xmm5, %xmm7, %xmm5
-; AVX1-ONLY-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 240(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0],xmm0[1,2,3],xmm7[4],xmm0[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm7, %xmm8
-; AVX1-ONLY-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 224(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, (%rsp) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm1[0],xmm0[1,2,3],xmm1[4],xmm0[5,6,7]
-; AVX1-ONLY-NEXT:    vpackusdw %xmm5, %xmm9, %xmm5
-; AVX1-ONLY-NEXT:    vmovdqa 208(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm1[0],xmm0[1,2,3],xmm1[4],xmm0[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm12 = xmm1[0],xmm0[1,2,3],xmm1[4],xmm0[5,6,7]
-; AVX1-ONLY-NEXT:    vpackusdw %xmm11, %xmm12, %xmm11
-; AVX1-ONLY-NEXT:    vpackusdw %xmm5, %xmm11, %xmm5
-; AVX1-ONLY-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm1[0],xmm0[1,2,3],xmm1[4],xmm0[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 160(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm13 = xmm9[0],xmm0[1,2,3],xmm9[4],xmm0[5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm7 = xmm1[0],xmm5[1,2,3],xmm1[4],xmm5[5,6,7]
+; AVX1-ONLY-NEXT:    vpackusdw %xmm6, %xmm7, %xmm6
+; AVX1-ONLY-NEXT:    vpackusdw %xmm0, %xmm6, %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovdqa 32(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa 48(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm7 = xmm15[0],xmm5[1,2,3],xmm15[4],xmm5[5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm8 = xmm1[0],xmm5[1,2,3],xmm1[4],xmm5[5,6,7]
+; AVX1-ONLY-NEXT:    vpackusdw %xmm7, %xmm8, %xmm7
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm8 = xmm2[0],xmm5[1,2,3],xmm2[4],xmm5[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm2, (%rsp) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm0[0],xmm5[1,2,3],xmm0[4],xmm5[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, %xmm12
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpackusdw %xmm8, %xmm9, %xmm8
+; AVX1-ONLY-NEXT:    vpackusdw %xmm7, %xmm8, %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 240(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm7 = xmm0[0],xmm5[1,2,3],xmm0[4],xmm5[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa 224(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm8 = xmm0[0],xmm5[1,2,3],xmm0[4],xmm5[5,6,7]
+; AVX1-ONLY-NEXT:    vpackusdw %xmm7, %xmm8, %xmm7
+; AVX1-ONLY-NEXT:    vmovdqa 208(%rdi), %xmm13
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm8 = xmm13[0],xmm5[1,2,3],xmm13[4],xmm5[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm14
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm14[0],xmm5[1,2,3],xmm14[4],xmm5[5,6,7]
+; AVX1-ONLY-NEXT:    vpackusdw %xmm8, %xmm9, %xmm8
+; AVX1-ONLY-NEXT:    vpackusdw %xmm7, %xmm8, %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm0[0],xmm5[1,2,3],xmm0[4],xmm5[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, %xmm7
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 160(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0],xmm5[1,2,3],xmm0[4],xmm5[5,6,7]
+; AVX1-ONLY-NEXT:    vpackusdw %xmm9, %xmm10, %xmm10
+; AVX1-ONLY-NEXT:    vmovdqa 144(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm9[0],xmm5[1,2,3],xmm9[4],xmm5[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpackusdw %xmm5, %xmm13, %xmm5
-; AVX1-ONLY-NEXT:    vmovdqa 144(%rdi), %xmm11
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm14 = xmm11[0],xmm0[1,2,3],xmm11[4],xmm0[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1,2,3],xmm3[4],xmm0[5,6,7]
-; AVX1-ONLY-NEXT:    vpackusdw %xmm14, %xmm0, %xmm0
-; AVX1-ONLY-NEXT:    vpackusdw %xmm5, %xmm0, %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0],xmm5[1,2,3],xmm8[4],xmm5[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpackusdw %xmm0, %xmm5, %xmm0
+; AVX1-ONLY-NEXT:    vpackusdw %xmm10, %xmm0, %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm15[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = mem[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm11[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm12[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[1,3,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm14 = xmm15[0,2,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[1,3,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm14[0],xmm5[0],xmm14[1],xmm5[1]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm11[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[1,3,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm10[0],xmm5[0],xmm10[1],xmm5[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm5[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm15[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm14 = xmm4[0,2,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm14[0],xmm5[0],xmm14[1],xmm5[1]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm14 = xmm6[0,2,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[1,3,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm10[0,2,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[1,3,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1,2,3],xmm5[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm5, %ymm0
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm1[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,1,1,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm10[0],xmm5[0],xmm10[1],xmm5[1]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm2[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[1,3,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm12[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[1,3,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm10[0],xmm6[1],xmm10[1]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm8[0,2,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa (%rsp), %xmm13 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm13[0,2,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm12[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
-; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = mem[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm13[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[1,3,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm10[0,2,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[1,3,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm14[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[1,3,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm5[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm7[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm9[0,2,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm11[0,2,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[1,3,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm14 = xmm3[0,2,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[1,3,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm7 = xmm14[0],xmm7[0],xmm14[1],xmm7[1]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1,2,3],xmm5[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm5, %ymm0
+; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm6 = mem[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,1,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm9[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[1,3,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm8[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[1,3,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm10[0],xmm6[0],xmm10[1],xmm6[1]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm0 = mem[3,1,2,3]
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm4 = mem[3,1,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,0,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,2,0,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
 ; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm2 = mem[3,1,2,3]
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,0,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm2[0,1,2,0,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm12[3,1,2,3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm15[3,1,2,3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[2,0,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm4[2,0,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm14 = mem[3,1,2,3]
-; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = mem[3,1,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm14[0,1,2,0,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm5[0,1,2,0,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm7 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm11[3,1,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,0,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm5[2,0,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm15[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm15 = xmm1[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm5[0,1,2,0,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm15[0,1,2,0,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm11 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vpshufd $231, (%rsp), %xmm6 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm6 = mem[3,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm4 = mem[3,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm6[2,0,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm4[2,0,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm7[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm4[2,0,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm11[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm3[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm11[0,1,2,0,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm12[0,1,2,0,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm13[2,0,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm14[2,0,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm7 = mem[3,1,2,3]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm8 = xmm13[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm8 = mem[3,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm7[0,1,2,0,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm8[0,1,2,0,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm8[0,1,2,0,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm9[0],xmm0[0],xmm9[1],xmm0[1]
 ; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm9 = mem[3,1,2,3]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[3,1,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm9[2,0,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm10[2,0,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm11[0],xmm2[0],xmm11[1],xmm2[1]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm11 = mem[3,1,2,3]
-; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm12 = mem[3,1,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm11[0,1,2,0,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm12[0,1,2,0,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
-; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm13 = mem[3,1,2,3]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,1,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm13[2,0,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm3[2,0,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
+; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm3 = mem[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm9[2,0,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm3[2,0,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm10[0],xmm0[0],xmm10[1],xmm0[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm15
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm0 = mem[0,1,3,1,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -1597,29 +1602,31 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    # xmm2 = mem[3,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm14[0,1,3,1,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm5[0,1,3,1,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm5[0,1,3,1,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm15[0,1,3,1,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[3,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm7[0,1,3,1,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm8[0,1,3,1,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm11[0,1,3,1,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm12[0,1,3,1,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm9[3,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm10[3,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm13[3,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm14[3,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm11[0,1,3,1,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm12[0,1,3,1,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm7[0,1,3,1,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm8[0,1,3,1,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm13[3,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm9[3,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %xmm2, 32(%rsi)
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
@@ -1632,12 +1639,12 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovaps %ymm2, 32(%rdx)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm2, (%rdx)
-; AVX1-ONLY-NEXT:    vmovaps %ymm15, 32(%rcx)
+; AVX1-ONLY-NEXT:    vmovaps %ymm10, 32(%rcx)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm2, (%rcx)
 ; AVX1-ONLY-NEXT:    vmovaps %ymm1, 32(%r8)
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, (%r8)
-; AVX1-ONLY-NEXT:    addq $296, %rsp # imm = 0x128
+; AVX1-ONLY-NEXT:    addq $232, %rsp
 ; AVX1-ONLY-NEXT:    vzeroupper
 ; AVX1-ONLY-NEXT:    retq
 ;
@@ -2255,118 +2262,134 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-SLOW-LABEL: load_i16_stride4_vf32:
 ; AVX512F-SLOW:       # %bb.0:
 ; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm1
-; AVX512F-SLOW-NEXT:    vpmovqw %ymm1, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm5[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm6[0,1,0,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = mem[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpmovqw %ymm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,5,0,5]
-; AVX512F-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm4, %ymm1, %ymm3
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm2, %xmm4
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm4
-; AVX512F-SLOW-NEXT:    vpmovqw %ymm4, %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm4
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm4[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm9[0,1,0,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = mem[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpmovqw %ymm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm8 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm8, %ymm1, %ymm7
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm0, %xmm8
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm7[0,1,2,3],zmm3[0,1,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm12
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm12[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm11
-; AVX512F-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm13
-; AVX512F-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm14
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm14[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm15[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm15[0],xmm6[0],xmm15[1],xmm6[1]
+; AVX512F-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm2
+; AVX512F-SLOW-NEXT:    vpmovqw %ymm2, %xmm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm6
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm6[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm5[0,1,0,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm7
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm7[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm9[0,1,0,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm1, %xmm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm2
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm3
+; AVX512F-SLOW-NEXT:    vpmovqw %ymm3, %xmm3
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm10
+; AVX512F-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm3
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm3[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm8[0,1,0,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm4
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm4[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm12[0,1,0,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm11 = xmm13[0],xmm11[0],xmm13[1],xmm11[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm11[6,7]
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm0, %xmm11
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm10[0,1,2,3],zmm2[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm10
+; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm11
+; AVX512F-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm13
+; AVX512F-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm14
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm9[0],xmm5[0],xmm9[1],xmm5[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm14[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[1,3,2,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm13[0,2,2,3]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm15[1,3,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm11[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[1,3,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm6, %ymm1, %ymm3
-; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm2, %zmm6
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm9[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm10[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm9[0],xmm6[0],xmm9[1],xmm6[1]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm8[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm15[0],xmm9[0],xmm15[1],xmm9[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3,4,5],ymm5[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm1, %zmm9
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm9, %xmm9
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm12[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm11[0,2,2,3]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[1,3,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm7[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[1,3,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm6, %ymm1, %ymm9
-; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm0, %zmm6
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm3[0,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm5[0,1,2,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm14[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm9[0,1,2,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm10[0],xmm3[0],xmm10[1],xmm3[1]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm13[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm10[2,0,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm11[2,0,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm3, %ymm1, %ymm13
-; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm2, %zmm3
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm10[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[1,3,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm12[0],xmm9[0],xmm12[1],xmm9[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3,4,5],ymm8[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm0, %zmm9
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm9, %xmm9
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm6[0,1,2,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm7[0,1,2,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm12
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm14[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm8[2,0,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm13[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm9[2,0,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm13 = xmm13[0],xmm14[0],xmm13[1],xmm14[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3,4,5],ymm12[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm1, %zmm13
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm13, %xmm13
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm12
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm3[0,1,2,0,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm4[0,1,2,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm12[0,1,2,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm4[0,1,2,0,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm8[2,0,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm7[2,0,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm11[2,0,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm10[2,0,2,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm13, %ymm1, %ymm14
-; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm0, %zmm13
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm13[0,1,2,3],zmm3[0,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,1,3,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,1,3,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm9[0],xmm5[0],xmm9[1],xmm5[1]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm10[3,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm11[3,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm5, %ymm1, %ymm9
-; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm2, %zmm2
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3,4,5],ymm13[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm0, %zmm14
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm14, %xmm14
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm12 = zmm13[0,1,2,3],zmm12[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,3,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,3,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm8[3,1,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm9[3,1,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5],ymm6[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm1, %zmm1
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm1, %xmm1
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,3,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm12[0,1,3,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm8[3,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm4, %ymm1, %ymm5
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm11[3,1,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm10[3,1,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
 ; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm0, %zmm0
 ; AVX512F-SLOW-NEXT:    vpmovqw %zmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[0,1,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm16, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm6, (%rdx)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm3, (%rcx)
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, (%rsi)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm5, (%rdx)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm12, (%rcx)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, (%r8)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
@@ -2388,6 +2411,7 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpermt2d %ymm7, %ymm11, %ymm10
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm1, %xmm7
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm10[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
 ; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %ymm10
 ; AVX512F-FAST-NEXT:    vpermd %ymm10, %ymm4, %ymm12
 ; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm12, %ymm13
@@ -2397,7 +2421,7 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpermt2d %ymm13, %ymm11, %ymm4
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm0, %xmm13
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm13[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm4[0,1,2,3],zmm7[0,1,2,3]
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm4[0,1,2,3],zmm7[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31>
 ; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm5, %ymm13
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u>
@@ -2406,13 +2430,14 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpsrlq $16, %zmm1, %zmm13
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm13, %xmm13
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm13[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm9
 ; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm12, %ymm12
 ; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm15, %ymm13
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3,4,5],ymm12[6,7]
 ; AVX512F-FAST-NEXT:    vpsrlq $16, %zmm0, %zmm13
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm13, %xmm13
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm12[0,1,2,3],zmm9[0,1,2,3]
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm12[0,1,2,3],zmm9[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [1,3,2,3,1,3,5,7]
 ; AVX512F-FAST-NEXT:    vpermd %ymm6, %ymm12, %ymm6
 ; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm13
@@ -2422,6 +2447,7 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpsrlq $32, %zmm1, %zmm13
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm13, %xmm13
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm15[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm13
 ; AVX512F-FAST-NEXT:    vpermd %ymm10, %ymm12, %ymm10
 ; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm10, %ymm2
 ; AVX512F-FAST-NEXT:    vpermd %ymm14, %ymm12, %ymm12
@@ -2430,20 +2456,21 @@ define void @load_i16_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpsrlq $32, %zmm0, %zmm2
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm2, %xmm2
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm13[0,1,2,3]
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm13[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm6, %ymm3
 ; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm8, %ymm6
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5],ymm3[6,7]
 ; AVX512F-FAST-NEXT:    vpsrlq $48, %zmm1, %zmm1
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm1, %xmm1
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
 ; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm10, %ymm3
 ; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm12, %ymm4
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
 ; AVX512F-FAST-NEXT:    vpsrlq $48, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm0, %xmm0
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,2,3]
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, (%rsi)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, (%rdx)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, (%rcx)
@@ -3045,199 +3072,202 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX1-ONLY-LABEL: load_i16_stride4_vf64:
 ; AVX1-ONLY:       # %bb.0:
-; AVX1-ONLY-NEXT:    subq $808, %rsp # imm = 0x328
-; AVX1-ONLY-NEXT:    vpxor %xmm4, %xmm4, %xmm4
-; AVX1-ONLY-NEXT:    vmovdqa 368(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm5[0],xmm4[1,2,3],xmm5[4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 352(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0],xmm4[1,2,3],xmm6[4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    subq $824, %rsp # imm = 0x338
+; AVX1-ONLY-NEXT:    vpxor %xmm7, %xmm7, %xmm7
+; AVX1-ONLY-NEXT:    vmovdqa 368(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0],xmm7[1,2,3],xmm4[4],xmm7[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 352(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm7[1,2,3],xmm1[4],xmm7[5,6,7]
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm0, %xmm1, %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa 336(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa 320(%rdi), %xmm13
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm13[0],xmm4[1,2,3],xmm13[4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm7[1,2,3],xmm1[4],xmm7[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa 320(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm7[1,2,3],xmm2[4],xmm7[5,6,7]
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm1, %xmm2, %xmm1
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm0, %xmm1, %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 304(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa 288(%rdi), %xmm14
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm14[0],xmm4[1,2,3],xmm14[4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 304(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm15[0],xmm7[1,2,3],xmm15[4],xmm7[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 288(%rdi), %xmm13
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm13[0],xmm7[1,2,3],xmm13[4],xmm7[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm0, %xmm1, %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa 272(%rdi), %xmm15
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm15[0],xmm4[1,2,3],xmm15[4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm15, (%rsp) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 256(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa 272(%rdi), %xmm14
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm14[0],xmm7[1,2,3],xmm14[4],xmm7[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 256(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm7[1,2,3],xmm3[4],xmm7[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm1, %xmm2, %xmm1
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm0, %xmm1, %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 112(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vpackusdw %xmm0, %xmm1, %xmm1
-; AVX1-ONLY-NEXT:    vmovdqa 80(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm7 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vpackusdw %xmm3, %xmm7, %xmm3
-; AVX1-ONLY-NEXT:    vpackusdw %xmm1, %xmm3, %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm7[1,2,3],xmm0[4],xmm7[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 32(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vmovdqa 48(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm7[1,2,3],xmm1[4],xmm7[5,6,7]
+; AVX1-ONLY-NEXT:    vpackusdw %xmm0, %xmm1, %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa 80(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm8 = xmm7[0],xmm4[1,2,3],xmm7[4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpackusdw %xmm3, %xmm8, %xmm9
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm1[0],xmm7[1,2,3],xmm1[4],xmm7[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm6 = xmm1[0],xmm7[1,2,3],xmm1[4],xmm7[5,6,7]
+; AVX1-ONLY-NEXT:    vpackusdw %xmm5, %xmm6, %xmm5
+; AVX1-ONLY-NEXT:    vpackusdw %xmm0, %xmm5, %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 32(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 48(%rdi), %xmm5
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm5[0],xmm7[1,2,3],xmm5[4],xmm7[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm8 = xmm1[0],xmm7[1,2,3],xmm1[4],xmm7[5,6,7]
+; AVX1-ONLY-NEXT:    vpackusdw %xmm0, %xmm8, %xmm9
 ; AVX1-ONLY-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, %xmm3
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm1[0],xmm7[1,2,3],xmm1[4],xmm7[5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm0[0],xmm7[1,2,3],xmm0[4],xmm7[5,6,7]
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm10, %xmm11, %xmm10
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm9, %xmm10, %xmm9
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 240(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa 224(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm8[0],xmm4[1,2,3],xmm8[4],xmm4[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa 240(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm8[0],xmm7[1,2,3],xmm8[4],xmm7[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 224(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0],xmm7[1,2,3],xmm0[4],xmm7[5,6,7]
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm9, %xmm10, %xmm9
 ; AVX1-ONLY-NEXT:    vmovdqa 208(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0],xmm7[1,2,3],xmm0[4],xmm7[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm0[0],xmm7[1,2,3],xmm0[4],xmm7[5,6,7]
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm10, %xmm11, %xmm10
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm9, %xmm10, %xmm9
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa 160(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm6[0],xmm7[1,2,3],xmm6[4],xmm7[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa 160(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm6[0],xmm7[1,2,3],xmm6[4],xmm7[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm9, %xmm10, %xmm9
 ; AVX1-ONLY-NEXT:    vmovdqa 144(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0],xmm7[1,2,3],xmm0[4],xmm7[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm12
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm12[0],xmm4[1,2,3],xmm12[4],xmm4[5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm12[0],xmm7[1,2,3],xmm12[4],xmm7[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm10, %xmm11, %xmm10
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm9, %xmm10, %xmm9
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 496(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm0[0],xmm7[1,2,3],xmm0[4],xmm7[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa 480(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0],xmm7[1,2,3],xmm0[4],xmm7[5,6,7]
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm9, %xmm10, %xmm9
 ; AVX1-ONLY-NEXT:    vmovdqa 464(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0],xmm7[1,2,3],xmm0[4],xmm7[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa 448(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm0[0],xmm7[1,2,3],xmm0[4],xmm7[5,6,7]
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm10, %xmm11, %xmm10
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm9, %xmm10, %xmm9
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 432(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm0[0],xmm7[1,2,3],xmm0[4],xmm7[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa 416(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0],xmm7[1,2,3],xmm0[4],xmm7[5,6,7]
 ; AVX1-ONLY-NEXT:    vpackusdw %xmm9, %xmm10, %xmm9
 ; AVX1-ONLY-NEXT:    vmovdqa 400(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm0[0],xmm7[1,2,3],xmm0[4],xmm7[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa 384(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vpackusdw %xmm10, %xmm4, %xmm4
-; AVX1-ONLY-NEXT:    vpackusdw %xmm9, %xmm4, %xmm4
-; AVX1-ONLY-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm5[0,2,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm6[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm7 = xmm0[0],xmm7[1,2,3],xmm0[4],xmm7[5,6,7]
+; AVX1-ONLY-NEXT:    vpackusdw %xmm10, %xmm7, %xmm7
+; AVX1-ONLY-NEXT:    vpackusdw %xmm9, %xmm7, %xmm7
+; AVX1-ONLY-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,1,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm9 = mem[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm7 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
 ; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm9 = mem[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[1,3,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm13[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm10 = mem[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[1,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm9[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm9 = mem[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm7 = xmm9[0,1,2,3],xmm7[4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm15[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm14[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm13[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,1,1,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm15[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm14[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[1,3,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm11 = mem[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm3[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[1,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1,2,3],xmm9[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm9, %ymm4
-; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm5[0,2,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, %xmm1
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm2[0,2,2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa (%rsp), %xmm3 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm3[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,1,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm4[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm2[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm7 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm1[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[1,3,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm10 = mem[0,2,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm2[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[1,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm9[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm9 = mem[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm7 = xmm9[0,1,2,3],xmm7[4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm5[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm7[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm10 = mem[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,1,1,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm3[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm10 = mem[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[1,3,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm3[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm11 = mem[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[1,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1,2,3],xmm9[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm9, %ymm4
-; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = mem[0,2,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm8[0,2,2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm8[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,1,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm9 = mem[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm7 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
 ; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm9 = mem[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[1,3,2,3,4,5,6,7]
@@ -3245,12 +3275,11 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    # xmm10 = mem[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[1,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm9[0,1,2,3],xmm4[4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm7 = xmm9[0,1,2,3],xmm7[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm9 = mem[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm10 = mem[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm6[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,1,1,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
 ; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
@@ -3260,75 +3289,78 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[1,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1,2,3],xmm9[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm9, %ymm0
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm7[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm7[0,2,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm8[0,2,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm5[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,1,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm13[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm6[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm7 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm14[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[1,3,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm12[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm10 = mem[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[1,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm9[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm13[0,2,2,3]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm7 = xmm9[0,1,2,3],xmm7[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm12[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,1,1,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm14[0,2,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm6[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,1,1,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm15[0,2,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm8[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[1,3,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm11 = mem[0,2,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm15[0,2,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[1,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1,2,3],xmm9[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm9, %ymm0
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm7[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm5[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm3[3,1,2,3]
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm0[0,1,2,0,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm1[0,1,2,0,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm2[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm4, (%rsp) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm0[0,1,2,0,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,2,0,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = mem[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,1,2,3]
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[2,0,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[2,0,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,0,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[3,1,2,3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = mem[3,1,2,3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,2,0,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm4[0,1,2,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = mem[3,1,2,3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm0 = mem[3,1,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm3 = mem[3,1,2,3]
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,0,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[2,0,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[0,1,2,0,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,2,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm0 = mem[3,1,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm4 = mem[3,1,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[2,0,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm4[2,0,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm0 = mem[3,1,2,3]
@@ -3358,47 +3390,48 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,2,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[0,1,2,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vpshufd $231, (%rsp), %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm2 = mem[3,1,2,3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, (%rsp) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm11 = mem[3,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,0,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm11[2,0,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm7[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm5[3,1,2,3]
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm8[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm13[3,1,2,3]
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[0,1,2,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm6[3,1,2,3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm12[3,1,2,3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[2,0,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[2,0,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm2 = mem[3,1,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm14[2,0,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,0,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm13[3,1,2,3]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm14[3,1,2,3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[0,1,2,0,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, %xmm14
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[0,1,2,0,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm12[3,1,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm6[3,1,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,2,0,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,2,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm12 = xmm15[3,1,2,3]
-; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm13 = mem[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm12 = xmm8[3,1,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm15[3,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm12[2,0,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm13[2,0,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufd $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm10 = mem[3,1,2,3]
@@ -3430,11 +3463,12 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm2[2,0,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm0 = mem[0,1,3,1,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vpshuflw $116, (%rsp), %xmm1 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1,3,1,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; AVX1-ONLY-NEXT:    vpshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -3454,7 +3488,8 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    # xmm15 = mem[3,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm15[0],xmm3[0],xmm15[1],xmm3[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm15
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm15 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm0 = mem[0,1,3,1,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
@@ -3471,12 +3506,13 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm3 = mem[0,1,3,1,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vpshuflw $231, (%rsp), %xmm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vpshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm3 = mem[3,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[3,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm11[0],xmm3[0],xmm11[1],xmm3[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm3
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm10[0,1,3,1,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm9[0,1,3,1,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
@@ -3491,19 +3527,20 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1,3,1,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm2 = mem[0,1,3,1,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vpshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = mem[3,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm14[3,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm4 = mem[3,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm14[0,1,3,1,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm2 = mem[0,1,3,1,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm4 = mem[0,1,3,1,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
@@ -3511,7 +3548,8 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm13[3,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %xmm2, 96(%rsi)
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
@@ -3548,7 +3586,7 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 32(%r8)
 ; AVX1-ONLY-NEXT:    vmovaps %ymm3, 64(%r8)
 ; AVX1-ONLY-NEXT:    vmovaps %ymm15, (%r8)
-; AVX1-ONLY-NEXT:    addq $808, %rsp # imm = 0x328
+; AVX1-ONLY-NEXT:    addq $824, %rsp # imm = 0x338
 ; AVX1-ONLY-NEXT:    vzeroupper
 ; AVX1-ONLY-NEXT:    retq
 ;
@@ -4888,250 +4926,292 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512F-SLOW-LABEL: load_i16_stride4_vf64:
 ; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    subq $136, %rsp
-; AVX512F-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm25
-; AVX512F-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm26
-; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm23
-; AVX512F-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm22
-; AVX512F-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm0
-; AVX512F-SLOW-NEXT:    vpmovqw %ymm0, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm13
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm13[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[0,1,0,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = mem[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpmovqw %ymm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [0,5,0,5]
-; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm3, %ymm2, %ymm1
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm22, %xmm3
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm1
-; AVX512F-SLOW-NEXT:    vpmovqw %ymm1, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm11
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm11[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm1[0,1,0,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = mem[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpmovqw %ymm6, %xmm6
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm5, %ymm2, %ymm4
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm23, %xmm5
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm4[0,1,2,3],zmm3[0,1,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 448(%rdi), %ymm3
-; AVX512F-SLOW-NEXT:    vpmovqw %ymm3, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa 496(%rdi), %xmm14
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm14[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm7[0,1,0,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = mem[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpmovqw %ymm5, %xmm5
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm4, %ymm2, %ymm3
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm26, %xmm4
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm4
-; AVX512F-SLOW-NEXT:    vpmovqw %ymm4, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 368(%rdi), %xmm24
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm24[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm6[0,1,0,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = mem[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpmovqw %ymm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm8[0],xmm5[0],xmm8[1],xmm5[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm5, %ymm2, %ymm4
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm25, %xmm5
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm4[0,1,2,3],zmm3[0,1,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 320(%rdi), %xmm30
-; AVX512F-SLOW-NEXT:    vmovdqa64 336(%rdi), %xmm31
-; AVX512F-SLOW-NEXT:    vmovdqa 352(%rdi), %xmm15
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm15[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 448(%rdi), %xmm29
-; AVX512F-SLOW-NEXT:    vmovdqa64 464(%rdi), %xmm17
-; AVX512F-SLOW-NEXT:    vmovdqa64 480(%rdi), %xmm18
+; AVX512F-SLOW-NEXT:    subq $88, %rsp
+; AVX512F-SLOW-NEXT:    vmovdqa64 240(%rdi), %xmm26
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm26[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm2[0,1,0,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm14
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm14[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm8[0,1,0,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm22 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX512F-SLOW-NEXT:    vmovdqa64 112(%rdi), %xmm18
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm18[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 64(%rdi), %xmm19
-; AVX512F-SLOW-NEXT:    vmovdqa64 80(%rdi), %xmm20
-; AVX512F-SLOW-NEXT:    vmovdqa64 96(%rdi), %xmm21
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm21[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 208(%rdi), %xmm27
-; AVX512F-SLOW-NEXT:    vmovdqa64 224(%rdi), %xmm28
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm28[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm10[0],xmm0[0],xmm10[1],xmm0[1]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm27[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[1,3,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm3[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[1,3,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm0, %ymm2, %ymm10
-; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm22, %zmm0
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm20[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[1,3,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm19[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[1,3,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm10[0],xmm5[0],xmm10[1],xmm5[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm1, %ymm2, %ymm5
-; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm23, %zmm1
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[0,1,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm7[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm4[0,1,0,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm11
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm11[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm9[0,1,0,2,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm17[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[1,3,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm29[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa64 496(%rdi), %xmm16
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm16[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm5[0,1,0,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 480(%rdi), %xmm17
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm17[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm10[0,1,0,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 368(%rdi), %xmm7
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm7[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm13[0,1,0,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa 352(%rdi), %xmm6
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm6[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm12[0,1,0,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa64 320(%rdi), %xmm20
+; AVX512F-SLOW-NEXT:    vmovdqa 336(%rdi), %xmm15
+; AVX512F-SLOW-NEXT:    vmovdqa64 448(%rdi), %xmm19
+; AVX512F-SLOW-NEXT:    vmovdqa64 464(%rdi), %xmm21
+; AVX512F-SLOW-NEXT:    vmovdqa64 64(%rdi), %xmm27
+; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm1
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm8[0],xmm2[0],xmm8[1],xmm2[1]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm0[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[1,3,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm8[0],xmm2[0],xmm8[1],xmm2[1]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, (%rsp) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm4[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm9[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm3[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm27[0,2,2,3]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,3,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm0, %ymm2, %ymm1
-; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm26, %zmm0
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm6[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm8[0,1,1,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm31[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm28 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm5[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm10[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm21[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm19[0,2,2,3]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,3,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm30[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[1,3,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm1, %ymm2, %ymm4
-; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm25, %zmm1
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[0,1,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm13[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm13[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm12[0,1,1,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm15[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[1,3,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm20[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,3,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm24 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm26[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm5[0,1,2,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm14[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,2,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm1[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm10[2,0,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[2,0,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm30 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm18[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm11[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm8[0,1,2,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm23 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm3[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm9[2,0,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm31
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm27[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm9[2,0,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm27 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm16[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm17[3,1,2,3]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm1[0,1,2,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm16
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm28[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm9[0,1,2,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm27[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[2,0,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm27
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm4[2,0,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm28
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm0, %ymm2, %ymm1
-; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm22, %zmm0
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm11[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm7[0,1,2,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm21[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm11[0,1,2,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm20[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm6[2,0,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm19[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm8[2,0,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm10[0],xmm3[0],xmm10[1],xmm3[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm1, %ymm2, %ymm3
-; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm23, %zmm1
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm19 = zmm1[0,1,2,3],zmm0[0,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm14[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm10[0,1,2,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm18[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm12[0,1,2,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm29
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[0,1,2,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm26
 ; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm18 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm17[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[2,0,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm20
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm24[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm5[0,1,2,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm15[0,1,2,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm17 = xmm14[0],xmm0[0],xmm14[1],xmm0[1]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm29[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm14[2,0,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm18, %ymm2, %ymm0
-; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm26, %zmm1
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm31[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm4[2,0,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm30[3,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm3[2,0,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm17, %ymm2, %ymm0
-; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm25, %zmm13
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm21[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm19[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm1[2,0,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm25
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[2,0,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm17 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm6[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm7[0,1,2,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[0,1,2,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm16 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm15[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm6[2,0,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm20[3,1,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm15[2,0,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm20 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX512F-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm0
+; AVX512F-SLOW-NEXT:    vpmovqw %ymm0, %xmm0
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm1
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm0
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm0, %xmm2
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm2
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm1
+; AVX512F-SLOW-NEXT:    vpmovqw %ymm1, %xmm1
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-SLOW-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm1[0,1,2,3,4,5],ymm14[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm1, %xmm13
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm13[0,1,2,3],zmm2[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa 448(%rdi), %ymm2
+; AVX512F-SLOW-NEXT:    vpmovqw %ymm2, %xmm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm2[0,1,2,3,4,5],ymm13[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm2
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm2, %xmm14
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm14
+; AVX512F-SLOW-NEXT:    vpmovqw %ymm14, %xmm14
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512F-SLOW-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm14[0,1,2,3,4,5],ymm12[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm21
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm21, %xmm14
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm14[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm13
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm19 = zmm12[0,1,2,3],zmm13[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vinserti128 $1, (%rsp), %ymm0, %ymm13 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3,4,5],ymm12[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm0, %zmm13
 ; AVX512F-SLOW-NEXT:    vpmovqw %zmm13, %xmm13
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 = zmm0[0,1,2,3],zmm1[0,1,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm0
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,3,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm9[0,1,3,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm7[0,1,3,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm11[0,1,3,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm7
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm9
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm28, %ymm0, %ymm14
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3,4,5],ymm13[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm1, %zmm14
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm14, %xmm14
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm12
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm28 = zmm13[0,1,2,3],zmm12[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3,4,5],ymm12[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm2, %zmm13
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm13, %xmm13
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm24, %ymm0, %ymm14
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3,4,5],ymm13[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm21, %zmm14
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm14, %xmm14
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm12
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm13[0,1,2,3],zmm12[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm5[0,1,3,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # xmm14 = mem[0,1,3,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm30, %ymm0, %ymm12
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5],ymm14[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm0, %zmm14
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm14, %xmm14
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm14[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm12
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm10[3,1,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # xmm11 = mem[3,1,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm11 = xmm11[0],xmm14[0],xmm11[1],xmm14[1]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm23, %ymm0, %ymm14
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm27, %ymm0, %ymm10
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm14[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm1, %zmm14
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm14, %xmm14
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm14[0,1,2,3],ymm10[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm10[0,1,2,3],zmm12[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw $116, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # xmm12 = mem[0,1,3,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm8[0,1,3,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm12 = xmm14[0],xmm12[0],xmm14[1],xmm12[1]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm18, %ymm0, %ymm14
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm14[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm2, %zmm14
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm14, %xmm14
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm5
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm5[3,1,2,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[3,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm7 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm0, %ymm2, %ymm7
-; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm22, %zmm0
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm14[0],xmm9[1],xmm14[1]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm16, %ymm0, %ymm14
+; AVX512F-SLOW-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm5
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm14[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm21, %zmm14
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm14, %xmm14
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm14[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm8[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm8
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,1,3,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm14
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[0,1,3,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm8 = xmm14[0],xmm8[0],xmm14[1],xmm8[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm13[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm0, %zmm0
 ; AVX512F-SLOW-NEXT:    vpmovqw %zmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[3,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm8[3,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm1, %ymm2, %ymm6
-; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm23, %zmm1
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[0,1,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm10[0,1,3,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm12[0,1,3,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,1,3,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm15[0,1,3,1,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm6
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[3,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm14[3,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm1, %ymm2, %ymm6
-; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm26, %zmm1
-; AVX512F-SLOW-NEXT:    vpmovqw %zmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm11
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[3,1,2,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; AVX512F-SLOW-NEXT:    vpermt2q %ymm5, %ymm2, %ymm3
-; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm25, %zmm2
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm11
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],ymm11[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm1, %zmm1
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm1, %xmm1
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm7[0,1,3,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,3,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm2, %zmm2
 ; AVX512F-SLOW-NEXT:    vpmovqw %zmm2, %xmm2
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm2[0,1,2,3],zmm1[0,1,2,3]
-; AVX512F-SLOW-NEXT:    vmovups (%rsp), %zmm2 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vmovaps %zmm2, 64(%rsi)
-; AVX512F-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vmovaps %zmm2, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vmovaps %zmm2, 64(%rdx)
-; AVX512F-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vmovaps %zmm2, (%rdx)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm17, 64(%rcx)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm19, (%rcx)
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm2
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm6[3,1,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm15[3,1,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
+; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm21, %zmm3
+; AVX512F-SLOW-NEXT:    vpmovqw %zmm3, %xmm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm2[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm19, 64(%rsi)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm22, (%rsi)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm24, 64(%rdx)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm28, (%rdx)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm5, 64(%rcx)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm10, (%rcx)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm1, 64(%r8)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, (%r8)
-; AVX512F-SLOW-NEXT:    addq $136, %rsp
+; AVX512F-SLOW-NEXT:    addq $88, %rsp
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
@@ -5154,6 +5234,7 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpermt2d %ymm0, %ymm7, %ymm3
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm4, %xmm0
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqa64 96(%rdi), %ymm27
 ; AVX512F-FAST-NEXT:    vpermd %ymm27, %ymm1, %ymm3
 ; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm3, %ymm9
@@ -5163,7 +5244,7 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpermt2d %ymm9, %ymm7, %ymm12
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm30, %xmm9
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm12[4,5,6,7]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm9[0,1,2,3],zmm0[0,1,2,3]
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm9[0,1,2,3],zmm0[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa64 480(%rdi), %ymm16
 ; AVX512F-FAST-NEXT:    vpermd %ymm16, %ymm1, %ymm0
 ; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm9
@@ -5173,6 +5254,7 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpermt2d %ymm9, %ymm7, %ymm13
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm26, %xmm9
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm9
 ; AVX512F-FAST-NEXT:    vmovdqa64 352(%rdi), %ymm18
 ; AVX512F-FAST-NEXT:    vpermd %ymm18, %ymm1, %ymm13
 ; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm13, %ymm14
@@ -5182,7 +5264,7 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpermt2d %ymm14, %ymm7, %ymm15
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm23, %xmm14
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm14[0,1,2,3],zmm9[0,1,2,3]
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm14[0,1,2,3],zmm9[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31>
 ; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm10, %ymm14
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <2,3,6,7,10,11,14,15,14,15,10,11,12,13,14,15,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u>
@@ -5191,26 +5273,28 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpsrlq $16, %zmm4, %zmm14
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm14, %xmm14
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm14[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm11
 ; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm3, %ymm3
 ; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm8, %ymm8
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm8[0,1,2,3,4,5],ymm3[6,7]
 ; AVX512F-FAST-NEXT:    vpsrlq $16, %zmm30, %zmm8
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm8, %xmm8
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm8[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm29 = zmm3[0,1,2,3],zmm11[0,1,2,3]
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm29 = zmm3[0,1,2,3],zmm11[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm12, %ymm3
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
 ; AVX512F-FAST-NEXT:    vpsrlq $16, %zmm26, %zmm3
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm3, %xmm3
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm13, %ymm3
 ; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm1, %ymm1
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6,7]
 ; AVX512F-FAST-NEXT:    vpsrlq $16, %zmm23, %zmm3
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm3, %xmm3
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm19 = zmm1[0,1,2,3],zmm0[0,1,2,3]
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm19 = zmm1[0,1,2,3],zmm0[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [1,3,2,3,1,3,5,7]
 ; AVX512F-FAST-NEXT:    vpermd %ymm24, %ymm15, %ymm3
 ; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm3, %ymm0
@@ -5219,7 +5303,8 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpermt2d %ymm0, %ymm7, %ymm1
 ; AVX512F-FAST-NEXT:    vpsrlq $32, %zmm4, %zmm0
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm1
 ; AVX512F-FAST-NEXT:    vpermd %ymm27, %ymm15, %ymm0
 ; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm13
 ; AVX512F-FAST-NEXT:    vpermd %ymm28, %ymm15, %ymm12
@@ -5228,7 +5313,7 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpsrlq $32, %zmm30, %zmm13
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm13, %xmm13
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm13[0,1,2,3],zmm1[0,1,2,3]
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm13[0,1,2,3],zmm1[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpermd %ymm16, %ymm15, %ymm13
 ; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm13, %ymm1
 ; AVX512F-FAST-NEXT:    vpermd %ymm17, %ymm15, %ymm14
@@ -5236,7 +5321,8 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpermt2d %ymm1, %ymm7, %ymm11
 ; AVX512F-FAST-NEXT:    vpsrlq $32, %zmm26, %zmm1
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm1, %xmm1
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm1[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm11
 ; AVX512F-FAST-NEXT:    vpermd %ymm18, %ymm15, %ymm1
 ; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm2
 ; AVX512F-FAST-NEXT:    vpermd %ymm20, %ymm15, %ymm5
@@ -5245,33 +5331,35 @@ define void @load_i16_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpsrlq $32, %zmm23, %zmm2
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm2, %xmm2
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm11[0,1,2,3]
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm11[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm3, %ymm3
 ; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm8, %ymm6
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5],ymm3[6,7]
 ; AVX512F-FAST-NEXT:    vpsrlq $48, %zmm4, %zmm4
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm4, %xmm4
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
 ; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm12, %ymm4
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
 ; AVX512F-FAST-NEXT:    vpsrlq $48, %zmm30, %zmm4
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm4, %xmm4
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm3[0,1,2,3]
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm3[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm13, %ymm3
 ; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm14, %ymm4
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm3[6,7]
 ; AVX512F-FAST-NEXT:    vpsrlq $48, %zmm26, %zmm4
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm4, %xmm4
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
 ; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm1, %ymm1
 ; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm5, %ymm4
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm1[6,7]
 ; AVX512F-FAST-NEXT:    vpsrlq $48, %zmm23, %zmm4
 ; AVX512F-FAST-NEXT:    vpmovqw %zmm4, %xmm4
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm3[0,1,2,3]
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm3[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm22, 64(%rsi)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm21, (%rsi)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm19, 64(%rdx)

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
index 013ad75a32cba..27c81b579dae9 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll
@@ -1190,7 +1190,7 @@ define void @load_i16_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm15[1,2,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm15 = xmm5[0,1],xmm6[2,3],xmm5[4,5],xmm6[6,7]
-; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm15 = xmm15[u,u,u,u,u,u,0,1,10,11,4,5,14,15,u,u]
+; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm15 = xmm15[u,u,u,u,u,u,0,1,10,11,4,5,14,15,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2],xmm15[3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vandps %ymm11, %ymm14, %ymm11
 ; AVX1-ONLY-NEXT:    vorps %ymm13, %ymm11, %ymm11
@@ -1511,8 +1511,8 @@ define void @load_i16_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,4,5,14,15,24,25,18,19,28,29,22,23,u,u,u,u,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm1 = xmm4[u,u,u,u,4,5,14,15,u,u,u,u,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm2 = xmm5[u,u,u,u,0,1,10,11,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm1 = xmm4[12,13,14,15,4,5,14,15,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm2 = xmm5[0,1,2,3,0,1,10,11,u,u,u,u,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
@@ -2620,7 +2620,7 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqa 288(%rdi), %xmm3
 ; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm13, %ymm11, %ymm10
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm11 = xmm3[0],xmm15[1],xmm3[2,3]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,u,u,u,u,2,3,12,13,6,7>
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = [6,7,2,3,4,5,6,7,6,7,2,3,12,13,6,7]
 ; AVX2-SLOW-NEXT:    vpshufb %xmm0, %xmm11, %xmm11
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm9[0,1,2,3,4],ymm11[5,6,7],ymm9[8,9,10,11,12],ymm11[13,14,15]
@@ -2636,7 +2636,7 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm3[0,1],xmm15[2],xmm3[3]
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm3, %xmm12
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,u,u,u,u,4,5,14,15,8,9>
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,2,3,4,5,6,7,8,9,4,5,14,15,8,9]
 ; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm14[0,1,2,3,4],ymm0[5,6,7],ymm14[8,9,10,11,12],ymm0[13,14,15]
@@ -2664,7 +2664,7 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpshufb %xmm14, %xmm9, %xmm9
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2],ymm0[3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm9 = xmm15[0],xmm3[1],xmm15[2,3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,u,u,6,7,0,1,10,11]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[0,1,6,7,4,5,6,7,8,9,6,7,0,1,10,11]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm0[0,1,2,3,4],ymm9[5,6,7],ymm0[8,9,10,11,12],ymm9[13,14,15]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
@@ -2683,7 +2683,7 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpshufb %xmm14, %xmm9, %xmm9
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2],ymm0[3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm9 = xmm13[0],xmm7[1],xmm13[2,3]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,u,u,6,7,0,1,10,11]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[0,1,6,7,4,5,6,7,8,9,6,7,0,1,10,11]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm0[0,1,2,3,4],ymm9[5,6,7],ymm0[8,9,10,11,12],ymm9[13,14,15]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
@@ -2702,7 +2702,7 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2],ymm0[3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm9 = xmm15[0,1],xmm12[2],xmm15[3]
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm12, %xmm11
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,u,u,u,u,8,9,2,3,12,13>
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,9,8,9,2,3,12,13]
 ; AVX2-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm9
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm0[0,1,2,3,4],ymm9[5,6,7],ymm0[8,9,10,11,12],ymm9[13,14,15]
@@ -2733,7 +2733,7 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3,4],xmm5[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,u,u,u,u,4,5,14,15,24,25,18,19,28,29,22,23,u,u,u,u,u,u,u,u>
 ; AVX2-SLOW-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <8,9,2,3,12,13,6,7,0,1,10,11,u,u,u,u>
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,9,2,3,12,13,6,7,0,1,10,11,0,1,6,7]
 ; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm4, %xmm4
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2],ymm2[3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm15[3,1,2,3]
@@ -2951,7 +2951,7 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpermd %ymm0, %ymm2, %ymm0
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,u,u,u,u,0,1,6,7,16,17,22,23,24,25,30,31,u,u,u,u,u,u,u,u>
 ; AVX2-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <8,9,2,3,12,13,6,7,0,1,10,11,u,u,u,u>
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [8,9,2,3,12,13,6,7,0,1,10,11,0,1,6,7]
 ; AVX2-FAST-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,2,1,3,0,2,5,7]
@@ -3059,7 +3059,7 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 288(%rdi), %xmm9
 ; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm1, %ymm14, %ymm15, %ymm14
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm1 = xmm9[0],xmm8[1],xmm9[2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,u,u,u,u,u,u,u,u,2,3,12,13,6,7>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm15 = [6,7,2,3,4,5,6,7,6,7,2,3,12,13,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm15, %xmm1, %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm7[0,1,2,3,4],ymm1[5,6,7],ymm7[8,9,10,11,12],ymm1[13,14,15]
@@ -3074,7 +3074,7 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm11 = xmm9[0,1],xmm8[2],xmm9[3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,u,u,u,u,u,4,5,14,15,8,9>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm12 = [0,1,2,3,4,5,6,7,8,9,4,5,14,15,8,9]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm12, %xmm11, %xmm11
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm11 = ymm13[0,1,2,3,4],ymm11[5,6,7],ymm13[8,9,10,11,12],ymm11[13,14,15]
@@ -3102,7 +3102,7 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm15, %xmm12, %xmm12
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm11 = ymm12[0,1,2],ymm11[3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm12 = xmm8[0],xmm9[1],xmm8[2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,u,u,u,u,6,7,0,1,10,11>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,6,7,4,5,6,7,8,9,6,7,0,1,10,11]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm12, %xmm12
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm12 = ymm11[0,1,2,3,4],ymm12[5,6,7],ymm11[8,9,10,11,12],ymm12[13,14,15]
@@ -3136,7 +3136,7 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm13, %xmm12, %xmm12
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm11 = ymm12[0,1,2],ymm11[3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm12 = xmm8[0,1],xmm9[2],xmm8[3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,u,u,u,u,8,9,2,3,12,13>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,9,8,9,2,3,12,13]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm12, %xmm12
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm12 = ymm11[0,1,2,3,4],ymm12[5,6,7],ymm11[8,9,10,11,12],ymm12[13,14,15]
@@ -3170,7 +3170,7 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3,4],xmm5[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,u,u,u,u,4,5,14,15,24,25,18,19,28,29,22,23,u,u,u,u,u,u,u,u>
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm6 = <8,9,2,3,12,13,6,7,0,1,10,11,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,9,2,3,12,13,6,7,0,1,10,11,0,1,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm4, %xmm4
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2],ymm1[3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -3244,7 +3244,7 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm2[0],ymm3[1,2],ymm2[3],ymm3[4],ymm2[5],ymm3[6,7],ymm2[8],ymm3[9,10],ymm2[11],ymm3[12],ymm2[13],ymm3[14,15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm11[2,3,0,1]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4],ymm12[5],ymm11[6],ymm12[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[6,7,16,17,26,27,20,21,30,31,24,25,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[6,7,16,17,26,27,20,21,30,31,24,25],zero,zero,zero,zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm5[0],ymm6[1],ymm5[2,3],ymm6[4],ymm5[5],ymm6[6],ymm5[7,8],ymm6[9],ymm5[10,11],ymm6[12],ymm5[13],ymm6[14],ymm5[15]
 ; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm13
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0],xmm13[1,2,3],xmm12[4,5],xmm13[6,7]
@@ -3277,7 +3277,7 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm13 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5],ymm2[6],ymm3[7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13],ymm2[14],ymm3[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm13[2,3,0,1]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4],ymm15[5],ymm13[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[8,9,18,19,28,29,22,23,16,17,26,27,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[8,9,18,19,28,29,22,23,16,17,26,27],zero,zero,zero,zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm6[0],ymm5[1],ymm6[2],ymm5[3],ymm6[4,5],ymm5[6],ymm6[7,8],ymm5[9],ymm6[10],ymm5[11],ymm6[12,13],ymm5[14],ymm6[15]
 ; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm15, %xmm14
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0,1],xmm14[2,3],xmm15[4,5,6],xmm14[7]
@@ -3310,7 +3310,7 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5],ymm3[6],ymm2[7,8],ymm3[9],ymm2[10,11],ymm3[12],ymm2[13],ymm3[14],ymm2[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm15[2,3,0,1]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm15[0,1,2,3,4,5],ymm13[6],ymm15[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,0,1,10,11,20,21,30,31,24,25,18,19,28,29,26,27,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,0,1,10,11,20,21,30,31,24,25,18,19,28,29,26,27,16,17,26,27]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2],ymm13[3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm14 = xmm12[0],xmm11[1],xmm12[2,3]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,u,u,u,u,u,u,6,7,0,1,10,11]
@@ -3336,7 +3336,7 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7,8],ymm2[9],ymm3[10],ymm2[11],ymm3[12,13],ymm2[14],ymm3[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm15[2,3,0,1]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4],ymm15[5],ymm14[6],ymm15[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,2,3,12,13,22,23,16,17,26,27,20,21,30,31,30,31,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,2,3,12,13,22,23,16,17,26,27,20,21,30,31,30,31,18,19,28,29]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2],ymm14[3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm13
 ; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm13, %ymm4
@@ -3394,145 +3394,141 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512F-FAST-LABEL: load_i16_stride5_vf32:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    vmovdqa 176(%rdi), %xmm0
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[4,5,14,15,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vmovdqa 160(%rdi), %xmm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[0,1,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX512F-FAST-NEXT:    vmovdqa 192(%rdi), %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa 224(%rdi), %ymm3
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5],ymm2[6],ymm3[7,8],ymm2[9],ymm3[10,11],ymm2[12],ymm3[13],ymm2[14],ymm3[15]
+; AVX512F-FAST-NEXT:    vmovdqa 176(%rdi), %xmm2
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm2[4,5,14,15,4,5,6,7,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vmovdqa 160(%rdi), %xmm3
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm3[0,1,10,11,8,9,10,11,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX512F-FAST-NEXT:    vmovdqa 192(%rdi), %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa 224(%rdi), %ymm5
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5],ymm4[6],ymm5[7,8],ymm4[9],ymm5[10,11],ymm4[12],ymm5[13],ymm4[14],ymm5[15]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <2,4,7,1,4,6,u,u>
-; AVX512F-FAST-NEXT:    vpermd %ymm5, %ymm6, %ymm5
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,8,9,14,15,0,1,6,7,16,17,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm6, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,8,9,14,15,0,1,6,7,16,17,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <8,9,3,2,4,u,u,u>
-; AVX512F-FAST-NEXT:    vpermi2d %ymm4, %ymm5, %ymm6
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,0,3,5,u>
-; AVX512F-FAST-NEXT:    vmovdqa 128(%rdi), %ymm4
-; AVX512F-FAST-NEXT:    vpermd %ymm4, %ymm5, %ymm5
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm5, %ymm5
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm7, %ymm20
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm5
+; AVX512F-FAST-NEXT:    vpermi2d %ymm0, %ymm1, %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa 256(%rdi), %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa 288(%rdi), %ymm1
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11],ymm0[12],ymm1[13,14],ymm0[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm8
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm8[3,4],xmm7[5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,4,5,14,15,8,9,2,3,12,13,6,7]
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm7[5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm10
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm11
 ; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %ymm8
 ; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %ymm9
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm8[0],ymm9[1,2],ymm8[3],ymm9[4],ymm8[5],ymm9[6,7],ymm8[8],ymm9[9,10],ymm8[11],ymm9[12],ymm8[13],ymm9[14,15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <1,u,u,u,4,6,1,3>
-; AVX512F-FAST-NEXT:    vpermd %ymm6, %ymm7, %ymm6
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[2,3,16,17,22,23,24,25,30,31,20,21,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm10[0],ymm11[1],ymm10[2,3],ymm11[4],ymm10[5],ymm11[6],ymm10[7,8],ymm11[9],ymm10[10,11],ymm11[12],ymm10[13],ymm11[14],ymm10[15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm12
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0],xmm12[1,2,3],xmm7[4,5],xmm12[6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[0,1,10,11,4,5,14,15,8,9,2,3,12,13],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpor %ymm6, %ymm7, %ymm7
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm8[0],ymm9[1,2],ymm8[3],ymm9[4],ymm8[5],ymm9[6,7],ymm8[8],ymm9[9,10],ymm8[11],ymm9[12],ymm8[13],ymm9[14,15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <1,u,u,u,4,6,1,3>
+; AVX512F-FAST-NEXT:    vpermd %ymm7, %ymm12, %ymm7
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[2,3,16,17,22,23,24,25,30,31,20,21],zero,zero,zero,zero,zero,zero
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm10[0],ymm11[1],ymm10[2,3],ymm11[4],ymm10[5],ymm11[6],ymm10[7,8],ymm11[9],ymm10[10,11],ymm11[12],ymm10[13],ymm11[14],ymm10[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm13
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0],xmm13[1,2,3],xmm12[4,5],xmm13[6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[0,1,10,11,4,5,14,15,8,9,2,3,12,13],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpor %ymm7, %ymm12, %ymm12
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <0,3,1,u,0,3,5,u>
+; AVX512F-FAST-NEXT:    vmovdqa 128(%rdi), %ymm7
+; AVX512F-FAST-NEXT:    vpermd %ymm7, %ymm13, %ymm14
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm14, %ymm14
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm15, %zmm7
-; AVX512F-FAST-NEXT:    vmovdqa 256(%rdi), %ymm5
-; AVX512F-FAST-NEXT:    vmovdqa 288(%rdi), %ymm6
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm6[0,1],ymm5[2],ymm6[3],ymm5[4],ymm6[5,6],ymm5[7],ymm6[8,9],ymm5[10],ymm6[11],ymm5[12],ymm6[13,14],ymm5[15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm14
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1,2],xmm14[3,4],xmm12[5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u,u,4,5,14,15,8,9,2,3,12,13,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [8,9,10,11,12,17,18,19]
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, %zmm14
-; AVX512F-FAST-NEXT:    vpermt2d %zmm12, %zmm16, %zmm14
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm7, %zmm17
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm12, %zmm15, %zmm14
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm14, %zmm16
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5],ymm8[6],ymm9[7,8],ymm8[9],ymm9[10,11],ymm8[12],ymm9[13],ymm8[14],ymm9[15]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <2,u,u,u,4,7,1,6>
 ; AVX512F-FAST-NEXT:    vpermd %ymm12, %ymm14, %ymm12
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[0,1,18,19,20,21,26,27,16,17,30,31,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[0,1,18,19,20,21,26,27,16,17,30,31],zero,zero,zero,zero,zero,zero
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm11[0],ymm10[1],ymm11[2],ymm10[3],ymm11[4,5],ymm10[6],ymm11[7,8],ymm10[9],ymm11[10],ymm10[11],ymm11[12,13],ymm10[14],ymm11[15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm7
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm14[0,1],xmm7[2,3],xmm14[4,5,6],xmm7[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[2,3,12,13,6,7,0,1,10,11,4,5,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpor %ymm7, %ymm12, %ymm7
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,1,3,6,u>
-; AVX512F-FAST-NEXT:    vpermd %ymm4, %ymm12, %ymm12
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
-; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm12, %ymm12
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm13, %ymm21
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm7, %zmm15, %zmm12
-; AVX512F-FAST-NEXT:    vpsrlq $48, %xmm0, %xmm7
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm1[2,3,12,13,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpunpckldq {{.*#+}} xmm7 = xmm13[0],xmm7[0],xmm13[1],xmm7[1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5],ymm3[6],ymm2[7,8],ymm3[9],ymm2[10,11],ymm3[12],ymm2[13],ymm3[14],ymm2[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = <0,2,5,7,4,7,u,u>
-; AVX512F-FAST-NEXT:    vpermd %ymm13, %ymm18, %ymm13
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[2,3,4,5,4,5,0,1,6,7,8,9,14,15,4,5,18,19,20,21,20,21,16,17,22,23,24,25,30,31,20,21]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm13[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm13[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm5[0],ymm6[1,2],ymm5[3],ymm6[4],ymm5[5],ymm6[6,7],ymm5[8],ymm6[9,10],ymm5[11],ymm6[12],ymm5[13],ymm6[14,15]
-; AVX512F-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm18 = [1,4,6,3,1,4,6,3]
-; AVX512F-FAST-NEXT:    # ymm18 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-FAST-NEXT:    vpermd %ymm13, %ymm18, %ymm13
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,16,17,30,31,24,25]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm13[5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm12, %zmm19
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm11[0,1],ymm10[2],ymm11[3],ymm10[4],ymm11[5,6],ymm10[7],ymm11[8,9],ymm10[10],ymm11[11],ymm10[12],ymm11[13,14],ymm10[15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm13
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm13[3,4],xmm7[5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[4,5,14,15,8,9,2,3,12,13,6,7,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm8[0],ymm9[1],ymm8[2,3],ymm9[4],ymm8[5],ymm9[6],ymm8[7,8],ymm9[9],ymm8[10,11],ymm9[12],ymm8[13],ymm9[14],ymm8[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = <0,2,u,u,5,7,2,4>
-; AVX512F-FAST-NEXT:    vpermd %ymm13, %ymm18, %ymm13
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,0,1,6,7,16,17,22,23,24,25,30,31,20,21,22,23,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2],ymm13[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm1[u,u,u,u,u,u,u,u,4,5,14,15,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm13 = xmm13[2],xmm0[2],xmm13[3],xmm0[3]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7,8],ymm2[9],ymm3[10],ymm2[11],ymm3[12,13],ymm2[14],ymm3[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm6
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm14[0,1],xmm6[2,3],xmm14[4,5,6],xmm6[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[2,3,12,13,6,7,0,1,10,11,4,5,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpor %ymm6, %ymm12, %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <1,3,2,u,1,3,6,u>
+; AVX512F-FAST-NEXT:    vpermd %ymm7, %ymm12, %ymm12
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
+; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm12, %ymm12
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm14, %ymm19
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm15, %zmm12
+; AVX512F-FAST-NEXT:    vpsrlq $48, %xmm2, %xmm6
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm3[2,3,12,13,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm14[0],xmm6[0],xmm14[1],xmm6[1]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5],ymm5[6],ymm4[7,8],ymm5[9],ymm4[10,11],ymm5[12],ymm4[13],ymm5[14],ymm4[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = <0,2,5,7,4,7,u,u>
+; AVX512F-FAST-NEXT:    vpermd %ymm14, %ymm17, %ymm14
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[2,3,4,5,4,5,0,1,6,7,8,9,14,15,4,5,18,19,20,21,20,21,16,17,22,23,24,25,30,31,20,21]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm14[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4],ymm0[5],ymm1[6,7],ymm0[8],ymm1[9,10],ymm0[11],ymm1[12],ymm0[13],ymm1[14,15]
+; AVX512F-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm17 = [1,4,6,3,1,4,6,3]
+; AVX512F-FAST-NEXT:    # ymm17 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-FAST-NEXT:    vpermd %ymm14, %ymm17, %ymm14
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,16,17,30,31,24,25]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4],ymm14[5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm12, %zmm17
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm14
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm14[0,1,2],xmm6[3,4],xmm14[5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,8,9,2,3,12,13,6,7,0,1,10,11]
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm3[u,u,u,u,u,u,u,u,4,5,14,15,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm14 = xmm14[2],xmm2[2],xmm14[3],xmm2[3]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm5[0],ymm4[1],ymm5[2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7,8],ymm4[9],ymm5[10],ymm4[11],ymm5[12,13],ymm4[14],ymm5[15]
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = <0,3,5,2,5,7,u,u>
 ; AVX512F-FAST-NEXT:    vpermd %ymm12, %ymm18, %ymm12
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[0,1,6,7,2,3,2,3,4,5,10,11,0,1,14,15,16,17,22,23,18,19,18,19,20,21,26,27,16,17,30,31]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1,2],xmm12[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [1,4,6,0,1,4,6,0]
-; AVX512F-FAST-NEXT:    # ymm13 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermd %ymm4, %ymm13, %ymm13
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm20, %ymm14
-; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm13, %ymm13
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm13, %zmm12
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm7, %zmm15, %zmm12
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5],ymm5[6],ymm6[7,8],ymm5[9],ymm6[10,11],ymm5[12],ymm6[13],ymm5[14],ymm6[15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm13
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm13[0,1,2],xmm7[3,4],xmm13[5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,8,9,2,3,12,13,6,7,0,1,10,11]
-; AVX512F-FAST-NEXT:    vpermi2d %zmm7, %zmm12, %zmm16
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm12, %zmm13
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm10[0],ymm11[1,2],ymm10[3],ymm11[4],ymm10[5],ymm11[6,7],ymm10[8],ymm11[9,10],ymm10[11],ymm11[12],ymm10[13],ymm11[14,15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm12
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm7 = xmm12[0],xmm7[1],xmm12[2],xmm7[3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[6,7,0,1,10,11,4,5,14,15,8,9,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2],xmm12[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm14[0,1,2,3],ymm12[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm12[0,1,2,3,4],ymm6[5,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm11[0,1],ymm10[2],ymm11[3],ymm10[4],ymm11[5,6],ymm10[7],ymm11[8,9],ymm10[10],ymm11[11],ymm10[12],ymm11[13,14],ymm10[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm14
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1,2],xmm14[3,4],xmm12[5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[4,5,14,15,8,9,2,3,12,13,6,7,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm8[0],ymm9[1],ymm8[2,3],ymm9[4],ymm8[5],ymm9[6],ymm8[7,8],ymm9[9],ymm8[10,11],ymm9[12],ymm8[13],ymm9[14],ymm8[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = <0,2,u,u,5,7,2,4>
+; AVX512F-FAST-NEXT:    vpermd %ymm14, %ymm18, %ymm14
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,0,1,6,7,16,17,22,23,24,25,30,31,20,21,22,23,16,17,22,23]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2],ymm14[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [1,4,6,0,1,4,6,0]
+; AVX512F-FAST-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpermd %ymm7, %ymm14, %ymm14
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm14, %ymm13
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm12, %zmm15, %zmm13
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm13, %zmm13
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm10[0],ymm11[1,2],ymm10[3],ymm11[4],ymm10[5],ymm11[6,7],ymm10[8],ymm11[9,10],ymm10[11],ymm11[12],ymm10[13],ymm11[14,15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm12
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm6 = xmm12[0],xmm6[1],xmm12[2],xmm6[3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[6,7,0,1,10,11,4,5,14,15,8,9,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm9[0],ymm8[1],ymm9[2],ymm8[3],ymm9[4,5],ymm8[6],ymm9[7,8],ymm8[9],ymm9[10],ymm8[11],ymm9[12,13],ymm8[14],ymm9[15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <0,3,u,u,5,0,2,7>
-; AVX512F-FAST-NEXT:    vpermd %ymm12, %ymm15, %ymm12
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,2,3,4,5,18,19,20,21,26,27,16,17,30,31,30,31,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2],ymm12[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm12 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <0,3,u,u,5,0,2,7>
+; AVX512F-FAST-NEXT:    vpermd %ymm12, %ymm14, %ymm12
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,2,3,4,5,18,19,20,21,26,27,16,17,30,31,30,31,18,19,20,21]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2],ymm12[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm12 = xmm2[0],xmm3[1],xmm2[2,3]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[6,7,0,1,10,11,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm15 = ymm3[0,1],ymm2[2],ymm3[3],ymm2[4],ymm3[5,6],ymm2[7],ymm3[8,9],ymm2[10],ymm3[11],ymm2[12],ymm3[13,14],ymm2[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <1,3,6,0,5,u,u,u>
-; AVX512F-FAST-NEXT:    vpermd %ymm15, %ymm16, %ymm15
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm15[2,3,2,3,4,5,0,1,6,7,8,9,14,15,4,5,18,19,18,19,20,21,16,17,22,23,24,25,30,31,20,21]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1,2],xmm15[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [2,4,7,0,2,4,7,0]
-; AVX512F-FAST-NEXT:    # ymm15 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermd %ymm4, %ymm15, %ymm15
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm21, %ymm14
-; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm15, %ymm14
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm5[0,1],ymm4[2],ymm5[3],ymm4[4],ymm5[5,6],ymm4[7],ymm5[8,9],ymm4[10],ymm5[11],ymm4[12],ymm5[13,14],ymm4[15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <1,3,6,0,5,u,u,u>
+; AVX512F-FAST-NEXT:    vpermd %ymm14, %ymm15, %ymm14
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[2,3,2,3,4,5,0,1,6,7,8,9,14,15,4,5,18,19,18,19,20,21,16,17,22,23,24,25,30,31,20,21]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1,2],xmm14[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [2,4,7,0,2,4,7,0]
+; AVX512F-FAST-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpermd %ymm7, %ymm14, %ymm14
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm19, %ymm15
+; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm14, %ymm14
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm14, %zmm12
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm12
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm12, %ymm7
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm5[0],ymm6[1],ymm5[2,3],ymm6[4],ymm5[5],ymm6[6],ymm5[7,8],ymm6[9],ymm5[10,11],ymm6[12],ymm5[13],ymm6[14],ymm5[15]
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm12
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm12, %ymm6
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5],ymm1[6],ymm0[7,8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13],ymm1[14],ymm0[15]
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm15
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1,2,3],xmm14[4,5],xmm15[6,7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,0,1,10,11,4,5,14,15,8,9,2,3,12,13]
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm7[0],ymm14[1,2,3,4,5,6,7],ymm7[8],ymm14[9,10,11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm14[4,5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm12, %zmm7
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm6[0],ymm14[1,2,3,4,5,6,7],ymm6[8],ymm14[9,10,11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm12, %zmm6
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm11[0],ymm10[1],ymm11[2,3],ymm10[4],ymm11[5],ymm10[6],ymm11[7,8],ymm10[9],ymm11[10,11],ymm10[12],ymm11[13],ymm10[14],ymm11[15]
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm11
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0,1,2],xmm10[3,4],xmm11[5,6,7]
@@ -3542,34 +3538,34 @@ define void @load_i16_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpermd %ymm8, %ymm9, %ymm8
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,0,1,6,7,16,17,22,23,24,25,30,31,16,17,22,23,16,17,22,23]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm10[0,1,2],ymm8[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[8,9,2,3,12,13,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm3[1,2],ymm2[3],ymm3[4],ymm2[5],ymm3[6,7],ymm2[8],ymm3[9,10],ymm2[11],ymm3[12],ymm2[13],ymm3[14,15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <1,4,6,3,6,u,u,u>
-; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm2, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,2,3,4,5,10,11,0,1,14,15,16,17,18,19,20,21,18,19,20,21,26,27,16,17,30,31]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,2,1,3,0,2,5,7]
-; AVX512F-FAST-NEXT:    vpermd %ymm4, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,1,6,7,4,5,6,7,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[8,9,2,3,12,13,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm5[1,2],ymm4[3],ymm5[4],ymm4[5],ymm5[6,7],ymm4[8],ymm5[9,10],ymm4[11],ymm5[12],ymm4[13],ymm5[14,15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <1,4,6,3,6,u,u,u>
+; AVX512F-FAST-NEXT:    vpermd %ymm3, %ymm4, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,2,3,4,5,10,11,0,1,14,15,16,17,18,19,20,21,18,19,20,21,26,27,16,17,30,31]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,2,1,3,0,2,5,7]
+; AVX512F-FAST-NEXT:    vpermd %ymm7, %ymm3, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[0,1,6,7,4,5,6,7,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
 ; AVX512F-FAST-NEXT:    movb $7, %al
 ; AVX512F-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm0 {%k1}
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm6[0],ymm5[1],ymm6[2],ymm5[3],ymm6[4,5],ymm5[6],ymm6[7,8],ymm5[9],ymm6[10],ymm5[11],ymm6[12,13],ymm5[14],ymm6[15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5,6],xmm3[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,2,3,12,13,6,7,0,1,10,11,4,5,14,15]
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1,2,3,4,5,6,7],ymm1[8],ymm2[9,10,11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm17, (%rsi)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm19, (%rdx)
+; AVX512F-FAST-NEXT:    vinserti64x4 $0, %ymm8, %zmm0, %zmm2 {%k1}
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6],xmm1[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,2,3,12,13,6,7,0,1,10,11,4,5,14,15]
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0],ymm0[1,2,3,4,5,6,7],ymm3[8],ymm0[9,10,11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm16, (%rsi)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm17, (%rdx)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm13, (%rcx)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, (%r8)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, (%r8)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, (%r9)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
@@ -5463,7 +5459,7 @@ define void @load_i16_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqa 288(%rdi), %xmm0
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm0[0],xmm4[1],xmm0[2,3]
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm0, %xmm15
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,u,u,u,u,2,3,12,13,6,7>
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = [6,7,2,3,4,5,6,7,6,7,2,3,12,13,6,7]
 ; AVX2-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm6
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
@@ -5510,7 +5506,7 @@ define void @load_i16_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm4, %xmm14
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm15[0,1],xmm4[2],xmm15[3]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,u,u,u,u,4,5,14,15,8,9>
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,2,3,4,5,6,7,8,9,4,5,14,15,8,9]
 ; AVX2-SLOW-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
@@ -5555,7 +5551,7 @@ define void @load_i16_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm0[3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm7 = xmm14[0],xmm13[1],xmm14[2,3]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,u,u,u,u,6,7,0,1,10,11>
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,6,7,4,5,6,7,8,9,6,7,0,1,10,11]
 ; AVX2-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm7
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm3[0,1,2,3,4],ymm7[5,6,7],ymm3[8,9,10,11,12],ymm7[13,14,15]
@@ -5636,7 +5632,7 @@ define void @load_i16_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
 ; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm5[0,1],xmm6[2],xmm5[3]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,u,u,u,u,u,8,9,2,3,12,13>
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,8,9,8,9,2,3,12,13]
 ; AVX2-SLOW-NEXT:    vpshufb %xmm3, %xmm4, %xmm4
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm1[0,1,2,3,4],ymm4[5,6,7],ymm1[8,9,10,11,12],ymm4[13,14,15]
@@ -5710,7 +5706,7 @@ define void @load_i16_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3,4],xmm3[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,u,u,u,u,4,5,14,15,24,25,18,19,28,29,22,23,u,u,u,u,u,u,u,u>
 ; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm0, %ymm0
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <8,9,2,3,12,13,6,7,0,1,10,11,u,u,u,u>
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,9,2,3,12,13,6,7,0,1,10,11,0,1,6,7]
 ; AVX2-SLOW-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm5[3,1,2,3]
@@ -6143,7 +6139,7 @@ define void @load_i16_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3,4],xmm2[5,6,7]
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm12[0,1],ymm11[2],ymm12[3],ymm11[4],ymm12[5,6],ymm11[7],ymm12[8,9],ymm11[10],ymm12[11],ymm11[12],ymm12[13,14],ymm11[15]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <8,9,2,3,12,13,6,7,0,1,10,11,u,u,u,u>
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [8,9,2,3,12,13,6,7,0,1,10,11,0,1,6,7]
 ; AVX2-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <1,3,u,u,6,0,3,5>
 ; AVX2-FAST-NEXT:    vpermd %ymm2, %ymm7, %ymm2
@@ -6364,7 +6360,7 @@ define void @load_i16_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 304(%rdi), %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 288(%rdi), %xmm10
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm6 = xmm10[0],xmm1[1],xmm10[2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,u,u,u,u,2,3,12,13,6,7>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm0 = [6,7,2,3,4,5,6,7,6,7,2,3,12,13,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm0, %xmm6, %xmm6
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
@@ -6408,7 +6404,7 @@ define void @load_i16_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, %xmm4
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,u,u,u,u,4,5,14,15,8,9>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,2,3,4,5,6,7,8,9,4,5,14,15,8,9]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
@@ -6455,7 +6451,7 @@ define void @load_i16_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm7, %xmm13
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm0[3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm7 = xmm4[0],xmm10[1],xmm4[2,3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,u,u,u,u,6,7,0,1,10,11>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,6,7,4,5,6,7,8,9,6,7,0,1,10,11]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm0, %xmm7, %xmm7
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm7 = ymm3[0,1,2,3,4],ymm7[5,6,7],ymm3[8,9,10,11,12],ymm7[13,14,15]
@@ -6537,7 +6533,7 @@ define void @load_i16_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} xmm4 = xmm5[0,1],xmm6[2],xmm5[3]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,u,u,u,u,u,8,9,2,3,12,13>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,8,9,8,9,2,3,12,13]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm4, %xmm4
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm4 = ymm1[0,1,2,3,4],ymm4[5,6,7],ymm1[8,9,10,11,12],ymm4[13,14,15]
@@ -6610,7 +6606,7 @@ define void @load_i16_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3,4],xmm3[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,u,u,u,u,4,5,14,15,24,25,18,19,28,29,22,23,u,u,u,u,u,u,u,u>
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm9 = <8,9,2,3,12,13,6,7,0,1,10,11,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm9 = [8,9,2,3,12,13,6,7,0,1,10,11,0,1,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm11 = [12,13,14,15,4,5,14,15,8,9,10,11,12,13,14,15]
@@ -7180,435 +7176,453 @@ define void @load_i16_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512F-FAST-LABEL: load_i16_stride5_vf64:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    subq $712, %rsp # imm = 0x2C8
+; AVX512F-FAST-NEXT:    subq $520, %rsp # imm = 0x208
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [4,5,14,15,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vmovdqa 496(%rdi), %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm28
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,10,11,8,9,10,11,8,9,10,11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vmovdqa 480(%rdi), %xmm4
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm30
+; AVX512F-FAST-NEXT:    vmovdqa 480(%rdi), %xmm3
+; AVX512F-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm3
 ; AVX512F-FAST-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; AVX512F-FAST-NEXT:    vmovdqa 512(%rdi), %ymm4
-; AVX512F-FAST-NEXT:    vmovdqa 544(%rdi), %ymm5
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5],ymm4[6],ymm5[7,8],ymm4[9],ymm5[10,11],ymm4[12],ymm5[13],ymm4[14],ymm5[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm5, %ymm29
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm4, %ymm22
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <2,4,7,1,4,6,u,u>
-; AVX512F-FAST-NEXT:    vpermd %ymm3, %ymm16, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,8,9,14,15,0,1,6,7,16,17,22,23,20,21,22,23,24,25,30,31,16,17,22,23>
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm3, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [8,9,3,2,4,5,7,6]
-; AVX512F-FAST-NEXT:    vpermt2d %ymm2, %ymm8, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [0,3,1,3,0,3,5,7]
-; AVX512F-FAST-NEXT:    vmovdqa 448(%rdi), %ymm4
-; AVX512F-FAST-NEXT:    vpermd %ymm4, %ymm15, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm4, %ymm20
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 352(%rdi), %ymm14
-; AVX512F-FAST-NEXT:    vmovdqa 320(%rdi), %ymm12
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm12[0],ymm14[1],ymm12[2,3],ymm14[4],ymm12[5],ymm14[6],ymm12[7,8],ymm14[9],ymm12[10,11],ymm14[12],ymm12[13],ymm14[14],ymm12[15]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2,3],xmm2[4,5],xmm3[6,7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <0,1,10,11,4,5,14,15,8,9,2,3,12,13,128,128,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa 384(%rdi), %ymm6
-; AVX512F-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 416(%rdi), %ymm3
-; AVX512F-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm6[0],ymm3[1,2],ymm6[3],ymm3[4],ymm6[5],ymm3[6,7],ymm6[8],ymm3[9,10],ymm6[11],ymm3[12],ymm6[13],ymm3[14,15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [1,3,0,2,4,6,1,3]
-; AVX512F-FAST-NEXT:    vpermd %ymm3, %ymm13, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,30,31,20,21,128,128,128,128,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm3, %ymm3
-; AVX512F-FAST-NEXT:    vpor %ymm3, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 176(%rdi), %xmm2
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa %xmm2, %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa 160(%rdi), %xmm11
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm11, %xmm1
-; AVX512F-FAST-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; AVX512F-FAST-NEXT:    vmovdqa 192(%rdi), %ymm10
-; AVX512F-FAST-NEXT:    vmovdqa 224(%rdi), %ymm9
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm9[0],ymm10[1],ymm9[2,3],ymm10[4],ymm9[5],ymm10[6],ymm9[7,8],ymm10[9],ymm9[10,11],ymm10[12],ymm9[13],ymm10[14],ymm9[15]
-; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm16, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vpermt2d %ymm0, %ymm8, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 128(%rdi), %ymm21
-; AVX512F-FAST-NEXT:    vpermd %ymm21, %ymm15, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm0, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm7, %ymm18
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %ymm2
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm2[1,2],ymm1[3],ymm2[4],ymm1[5],ymm2[6,7],ymm1[8],ymm2[9,10],ymm1[11],ymm2[12],ymm1[13],ymm2[14,15]
-; AVX512F-FAST-NEXT:    vmovdqa %ymm2, %ymm7
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa %ymm1, %ymm8
-; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm13, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa 512(%rdi), %ymm8
+; AVX512F-FAST-NEXT:    vmovdqa 544(%rdi), %ymm9
+; AVX512F-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 576(%rdi), %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa 608(%rdi), %ymm11
+; AVX512F-FAST-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 352(%rdi), %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa 320(%rdi), %ymm3
+; AVX512F-FAST-NEXT:    vmovdqu %ymm3, (%rsp) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5],ymm4[6],ymm3[7,8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13],ymm4[14],ymm3[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm4, %ymm18
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm3[0],xmm4[1,2,3],xmm3[4,5],xmm4[6,7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,1,10,11,4,5,14,15,8,9,2,3,12,13,128,128,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa 384(%rdi), %ymm7
+; AVX512F-FAST-NEXT:    vmovdqa 416(%rdi), %ymm10
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm7[0],ymm10[1,2],ymm7[3],ymm10[4],ymm7[5],ymm10[6,7],ymm7[8],ymm10[9,10],ymm7[11],ymm10[12],ymm7[13],ymm10[14,15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm10, %ymm29
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm7, %ymm24
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [1,3,0,2,4,6,1,3]
+; AVX512F-FAST-NEXT:    vpermd %ymm5, %ymm7, %ymm5
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,30,31,20,21,128,128,128,128,128,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm5, %ymm5
+; AVX512F-FAST-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm11[0,1],ymm6[2],ymm11[3],ymm6[4],ymm11[5,6],ymm6[7],ymm11[8,9],ymm6[10],ymm11[11],ymm6[12],ymm11[13,14],ymm6[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm6, %ymm30
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3,4],xmm5[5,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5],ymm8[6],ymm9[7,8],ymm8[9],ymm9[10,11],ymm8[12],ymm9[13],ymm8[14],ymm9[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm8, %ymm28
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = <2,4,7,1,4,6,u,u>
+; AVX512F-FAST-NEXT:    vpermd %ymm6, %ymm17, %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,8,9,14,15,0,1,6,7,16,17,22,23,20,21,22,23,24,25,30,31,16,17,22,23>
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm6, %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [8,9,3,2,4,5,7,6]
+; AVX512F-FAST-NEXT:    vpermt2d %ymm2, %ymm19, %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [12,13,14,15,4,5,14,15,8,9,2,3,12,13,6,7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm5, %xmm5
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3,4],ymm5[5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [0,3,1,3,0,3,5,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 448(%rdi), %ymm27
+; AVX512F-FAST-NEXT:    vpermd %ymm27, %ymm21, %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
+; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm6, %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm25, %zmm6
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm4
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 176(%rdi), %xmm4
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa %xmm4, %xmm13
+; AVX512F-FAST-NEXT:    vmovdqa 160(%rdi), %xmm4
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm16
+; AVX512F-FAST-NEXT:    vpunpckldq {{.*#+}} xmm20 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %ymm15
+; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %ymm4
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm15[0],ymm4[1,2],ymm15[3],ymm4[4],ymm15[5],ymm4[6,7],ymm15[8],ymm4[9,10],ymm15[11],ymm4[12],ymm15[13],ymm4[14,15]
+; AVX512F-FAST-NEXT:    vmovdqa %ymm4, %ymm6
+; AVX512F-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm7, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm5
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm5[1],ymm2[2,3],ymm5[4],ymm2[5],ymm5[6],ymm2[7,8],ymm5[9],ymm2[10,11],ymm5[12],ymm2[13],ymm5[14],ymm2[15]
-; AVX512F-FAST-NEXT:    vmovdqa %ymm5, %ymm6
-; AVX512F-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm5
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1,2,3],xmm1[4,5],xmm5[6,7]
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm0[0],ymm5[1],ymm0[2,3],ymm5[4],ymm0[5],ymm5[6],ymm0[7,8],ymm5[9],ymm0[10,11],ymm5[12],ymm0[13],ymm5[14],ymm0[15]
+; AVX512F-FAST-NEXT:    vmovdqa %ymm5, %ymm7
+; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm10
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2,3],xmm4[4,5],xmm5[6,7]
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm3
+; AVX512F-FAST-NEXT:    vpor %ymm1, %ymm3, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa 192(%rdi), %ymm14
+; AVX512F-FAST-NEXT:    vmovdqa 224(%rdi), %ymm12
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm12[0],ymm14[1],ymm12[2,3],ymm14[4],ymm12[5],ymm14[6],ymm12[7,8],ymm14[9],ymm12[10,11],ymm14[12],ymm12[13],ymm14[14],ymm12[15]
+; AVX512F-FAST-NEXT:    vpermd %ymm3, %ymm17, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm3, %ymm3
+; AVX512F-FAST-NEXT:    vmovdqa 256(%rdi), %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa 288(%rdi), %ymm5
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0,1],ymm4[2],ymm5[3],ymm4[4],ymm5[5,6],ymm4[7],ymm5[8,9],ymm4[10],ymm5[11],ymm4[12],ymm5[13,14],ymm4[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm8
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm8[3,4],xmm0[5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vpermt2d %ymm20, %ymm19, %ymm3
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 128(%rdi), %ymm20
+; AVX512F-FAST-NEXT:    vpermd %ymm20, %ymm21, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm2, %ymm2
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm25, %zmm2
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm6[0],ymm2[1],ymm6[2],ymm2[3],ymm6[4,5],ymm2[6],ymm6[7,8],ymm2[9],ymm6[10],ymm2[11],ymm6[12,13],ymm2[14],ymm6[15]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm7[0],ymm10[1],ymm7[2],ymm10[3],ymm7[4,5],ymm10[6],ymm7[7,8],ymm10[9],ymm7[10],ymm10[11],ymm7[12,13],ymm10[14],ymm7[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm10, %ymm31
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm7, %ymm26
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6],xmm1[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = <2,u,u,u,4,7,1,6>
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm7[0],ymm8[1],ymm7[2,3],ymm8[4],ymm7[5],ymm8[6],ymm7[7,8],ymm8[9],ymm7[10,11],ymm8[12],ymm7[13],ymm8[14],ymm7[15]
-; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm24, %ymm4
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,18,19,20,21,26,27,16,17,30,31,128,128,128,128,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm5
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <2,3,12,13,6,7,0,1,10,11,4,5,14,15,128,128,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
-; AVX512F-FAST-NEXT:    vporq %ymm5, %ymm0, %ymm16
-; AVX512F-FAST-NEXT:    vpsrlq $48, %xmm3, %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,3,12,13,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm11, %xmm6
-; AVX512F-FAST-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = <0,2,5,7,4,7,u,u>
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm10[0],ymm9[1],ymm10[2,3],ymm9[4],ymm10[5],ymm9[6],ymm10[7,8],ymm9[9],ymm10[10,11],ymm9[12],ymm10[13],ymm9[14],ymm10[15]
-; AVX512F-FAST-NEXT:    vpermd %ymm6, %ymm23, %ymm15
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [2,3,4,5,4,5,0,1,6,7,8,9,14,15,4,5,18,19,20,21,20,21,16,17,22,23,24,25,30,31,20,21]
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm15, %ymm15
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm15[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm5[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm26 = [1,4,6,3,1,4,6,3]
-; AVX512F-FAST-NEXT:    # ymm26 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa 256(%rdi), %ymm0
-; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 288(%rdi), %ymm5
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm22 = <2,u,u,u,4,7,1,6>
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm6[0],ymm15[1],ymm6[2,3],ymm15[4],ymm6[5],ymm15[6],ymm6[7,8],ymm15[9],ymm6[10,11],ymm15[12],ymm6[13],ymm15[14],ymm6[15]
+; AVX512F-FAST-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm22, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,18,19,20,21,26,27,16,17,30,31,128,128,128,128,128,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm3
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <2,3,12,13,6,7,0,1,10,11,4,5,14,15,128,128,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vporq %ymm3, %ymm0, %ymm19
+; AVX512F-FAST-NEXT:    vpsrlq $48, %xmm13, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm13, %xmm17
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <2,3,12,13,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm16, %xmm11
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm11, %xmm9
+; AVX512F-FAST-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = <0,2,5,7,4,7,u,u>
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm14[0],ymm12[1],ymm14[2,3],ymm12[4],ymm14[5],ymm12[6],ymm14[7,8],ymm12[9],ymm14[10,11],ymm12[12],ymm14[13],ymm12[14],ymm14[15]
+; AVX512F-FAST-NEXT:    vmovdqa %ymm12, %ymm7
+; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm21, %ymm8
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [2,3,4,5,4,5,0,1,6,7,8,9,14,15,4,5,18,19,20,21,20,21,16,17,22,23,24,25,30,31,20,21]
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm8, %ymm8
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2],xmm8[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [1,4,6,3,1,4,6,3]
+; AVX512F-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm4[0],ymm5[1,2],ymm4[3],ymm5[4],ymm4[5],ymm5[6,7],ymm4[8],ymm5[9,10],ymm4[11],ymm5[12],ymm4[13],ymm5[14,15]
+; AVX512F-FAST-NEXT:    vmovdqa %ymm5, %ymm12
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm0[0],ymm5[1,2],ymm0[3],ymm5[4],ymm0[5],ymm5[6,7],ymm0[8],ymm5[9,10],ymm0[11],ymm5[12],ymm0[13],ymm5[14,15]
-; AVX512F-FAST-NEXT:    vpermd %ymm8, %ymm26, %ymm8
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [0,1,2,3,2,3,4,5,10,11,0,1,14,15,8,9,16,17,18,19,18,19,20,21,26,27,16,17,30,31,24,25]
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm8, %ymm8
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4],ymm8[5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [1,3,2,3,1,3,6,7]
-; AVX512F-FAST-NEXT:    vpermd %ymm21, %ymm27, %ymm8
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm8, %ymm8
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm16, %zmm25, %zmm8
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm5
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm14[0],ymm12[1],ymm14[2],ymm12[3],ymm14[4,5],ymm12[6],ymm14[7,8],ymm12[9],ymm14[10],ymm12[11],ymm14[12,13],ymm12[14],ymm14[15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm8
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm8[2,3],xmm7[4,5,6],xmm8[7]
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm7, %ymm4
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpblendw $173, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm7 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm7 = mem[0],ymm5[1],mem[2,3],ymm5[4],mem[5],ymm5[6],mem[7,8],ymm5[9],mem[10,11],ymm5[12],mem[13],ymm5[14],mem[15]
-; AVX512F-FAST-NEXT:    vpermd %ymm7, %ymm24, %ymm7
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm1
-; AVX512F-FAST-NEXT:    vpor %ymm1, %ymm4, %ymm4
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm29, %ymm13
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm22, %ymm8
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm8[0],ymm13[1],ymm8[2,3],ymm13[4],ymm8[5],ymm13[6],ymm8[7,8],ymm13[9],ymm8[10,11],ymm13[12],ymm8[13],ymm13[14],ymm8[15]
-; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm23, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm30, %xmm5
-; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
-; AVX512F-FAST-NEXT:    vpsrlq $48, %xmm28, %xmm6
-; AVX512F-FAST-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa 576(%rdi), %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa 608(%rdi), %ymm7
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm1[0],ymm7[1,2],ymm1[3],ymm7[4],ymm1[5],ymm7[6,7],ymm1[8],ymm7[9,10],ymm1[11],ymm7[12],ymm1[13],ymm7[14,15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm7, %ymm19
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm17
-; AVX512F-FAST-NEXT:    vpermd %ymm6, %ymm26, %ymm6
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm6, %ymm0
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7]
-; AVX512F-FAST-NEXT:    vpermd %ymm20, %ymm27, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm25, %zmm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqa %ymm4, %ymm5
+; AVX512F-FAST-NEXT:    vpermd %ymm13, %ymm9, %ymm13
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,2,3,2,3,4,5,10,11,0,1,14,15,8,9,16,17,18,19,18,19,20,21,26,27,16,17,30,31,24,25]
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm13, %ymm13
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4],ymm13[5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [1,3,2,3,1,3,6,7]
+; AVX512F-FAST-NEXT:    vpermd %ymm20, %ymm16, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm19, %zmm25, %zmm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,u,u,u,4,5,14,15,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm11, %xmm0
-; AVX512F-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX512F-FAST-NEXT:    vmovdqa %xmm3, %xmm14
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,3,5,2,5,7,u,u>
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm9[0],ymm10[1],ymm9[2],ymm10[3],ymm9[4,5],ymm10[6],ymm9[7,8],ymm10[9],ymm9[10],ymm10[11],ymm9[12,13],ymm10[14],ymm9[15]
-; AVX512F-FAST-NEXT:    vpermd %ymm6, %ymm2, %ymm7
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,1,6,7,2,3,2,3,4,5,10,11,0,1,14,15,16,17,22,23,18,19,18,19,20,21,26,27,16,17,30,31]
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm7, %ymm7
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm7[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [1,4,6,0,1,4,6,0]
-; AVX512F-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermd %ymm21, %ymm7, %ymm12
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm18, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm12, %ymm12
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm12, %zmm29
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm22, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqu64 %ymm22, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm13[0],ymm1[1],ymm13[2],ymm1[3],ymm13[4,5],ymm1[6],ymm13[7,8],ymm1[9],ymm13[10],ymm1[11],ymm13[12,13],ymm1[14],ymm13[15]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm2, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm5, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm28[2],xmm2[3],xmm28[3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm28, %xmm8
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm0[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpermd %ymm20, %ymm7, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqu64 %ymm20, (%rsp) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm26
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm14[0],xmm11[1],xmm14[2,3]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <6,7,0,1,10,11,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <1,3,6,0,5,u,u,u>
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm9[0,1],ymm10[2],ymm9[3],ymm10[4],ymm9[5,6],ymm10[7],ymm9[8,9],ymm10[10],ymm9[11],ymm10[12],ymm9[13,14],ymm10[15]
-; AVX512F-FAST-NEXT:    vpermd %ymm6, %ymm2, %ymm7
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [2,3,2,3,4,5,0,1,6,7,8,9,14,15,4,5,18,19,18,19,20,21,16,17,22,23,24,25,30,31,20,21]
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm7, %ymm7
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm7[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [2,4,7,0,2,4,7,0]
-; AVX512F-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermd %ymm21, %ymm7, %ymm12
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm12, %ymm12
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm12, %zmm27
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm8[0],xmm5[1],xmm8[2,3]
-; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm13[0,1],ymm1[2],ymm13[3],ymm1[4],ymm13[5,6],ymm1[7],ymm13[8,9],ymm1[10],ymm13[11],ymm1[12],ymm13[13,14],ymm1[15]
-; AVX512F-FAST-NEXT:    vpermd %ymm4, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpermd %ymm20, %ymm7, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm28
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm14[0,1],xmm11[2],xmm14[3]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm10[0],ymm9[1,2],ymm10[3],ymm9[4],ymm10[5],ymm9[6,7],ymm10[8],ymm9[9,10],ymm10[11],ymm9[12],ymm10[13],ymm9[14,15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[8,9,2,3,12,13,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <1,4,6,3,6,u,u,u>
-; AVX512F-FAST-NEXT:    vpermd %ymm2, %ymm1, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,2,3,4,5,10,11,0,1,14,15,16,17,18,19,20,21,18,19,20,21,26,27,16,17,30,31]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,2,1,3,0,2,5,7]
-; AVX512F-FAST-NEXT:    vpermd %ymm21, %ymm1, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[0,1,6,7,4,5,6,7,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm16
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm19, %ymm10
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm11
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm10[0,1],ymm11[2],ymm10[3],ymm11[4],ymm10[5,6],ymm11[7],ymm10[8,9],ymm11[10],ymm10[11],ymm11[12],ymm10[13,14],ymm11[15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3,4],xmm0[5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm19 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [12,13,14,15,4,5,14,15,8,9,2,3,12,13,6,7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [8,9,10,11,12,17,18,19]
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm21, %zmm19
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0,1],ymm8[2],ymm5[3],ymm8[4],ymm5[5,6],ymm8[7],ymm5[8,9],ymm8[10],ymm5[11],ymm8[12],ymm5[13,14],ymm8[15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm12
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm12[3,4],xmm0[5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm30 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm30, %zmm31
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm21, %zmm31
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm4[0,1],ymm2[2],ymm4[3],ymm2[4],ymm4[5,6],ymm2[7],ymm4[8,9],ymm2[10],ymm4[11],ymm2[12],ymm4[13,14],ymm2[15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm12
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm12[3,4],xmm0[5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = <0,2,u,u,5,7,2,4>
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm3[0],ymm15[1],ymm3[2,3],ymm15[4],ymm3[5],ymm15[6],ymm3[7,8],ymm15[9],ymm3[10,11],ymm15[12],ymm3[13],ymm15[14],ymm3[15]
-; AVX512F-FAST-NEXT:    vpermd %ymm13, %ymm18, %ymm13
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [0,1,6,7,8,9,14,15,4,5,6,7,0,1,6,7,16,17,22,23,24,25,30,31,20,21,22,23,16,17,22,23]
-; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm13, %ymm13
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,5,14,15,8,9,2,3,12,13,6,7,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm13[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm25, %zmm29
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0],ymm8[1],ymm5[2,3],ymm8[4],ymm5[5],ymm8[6],ymm5[7,8],ymm8[9],ymm5[10,11],ymm8[12],ymm5[13],ymm8[14],ymm5[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm5, %ymm17
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm13
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0,1,2],xmm0[3,4],xmm13[5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [0,1,2,3,8,9,2,3,12,13,6,7,0,1,10,11]
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm29, %zmm24
-; AVX512F-FAST-NEXT:    vpermt2d %zmm0, %zmm21, %zmm24
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm18, %ymm4
+; AVX512F-FAST-NEXT:    vpblendw $74, (%rsp), %ymm4, %ymm0 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm0 = ymm4[0],mem[1],ymm4[2],mem[3],ymm4[4,5],mem[6],ymm4[7,8],mem[9],ymm4[10],mem[11],ymm4[12,13],mem[14],ymm4[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm8
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm8[2,3],xmm0[4,5,6],xmm8[7]
+; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm24, %ymm8
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm29, %ymm10
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm10[0],ymm8[1],ymm10[2,3],ymm8[4],ymm10[5],ymm8[6],ymm10[7,8],ymm8[9],ymm10[10,11],ymm8[12],ymm10[13],ymm8[14],ymm10[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm29, %ymm19
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm24, %ymm23
+; AVX512F-FAST-NEXT:    vpermd %ymm4, %ymm22, %ymm4
+; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm4, %ymm4
+; AVX512F-FAST-NEXT:    vpor %ymm4, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm28, %ymm6
+; AVX512F-FAST-NEXT:    vpblendw $82, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm4 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm4 = ymm6[0],mem[1],ymm6[2,3],mem[4],ymm6[5],mem[6],ymm6[7,8],mem[9],ymm6[10,11],mem[12],ymm6[13],mem[14],ymm6[15]
+; AVX512F-FAST-NEXT:    vpermd %ymm4, %ymm21, %ymm4
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm10, %xmm3
+; AVX512F-FAST-NEXT:    vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm29 # 16-byte Reload
+; AVX512F-FAST-NEXT:    vpsrlq $48, %xmm29, %xmm4
+; AVX512F-FAST-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm2[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0,1],ymm6[2],ymm5[3],ymm6[4],ymm5[5,6],ymm6[7],ymm5[8,9],ymm6[10],ymm5[11],ymm6[12],ymm5[13,14],ymm6[15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm7
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm7[3,4],xmm0[5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm14[0],ymm9[1],ymm14[2,3],ymm9[4],ymm14[5],ymm9[6],ymm14[7,8],ymm9[9],ymm14[10,11],ymm9[12],ymm14[13],ymm9[14],ymm14[15]
-; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm18, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm25, %zmm26
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm10[0],ymm11[1],ymm10[2,3],ymm11[4],ymm10[5],ymm11[6],ymm10[7,8],ymm11[9],ymm10[10,11],ymm11[12],ymm10[13],ymm11[14],ymm10[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm11, %ymm18
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm10, %ymm25
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm30, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqu64 %ymm30, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm6[1,2],ymm4[3],ymm6[4],ymm4[5],ymm6[6,7],ymm4[8],ymm6[9,10],ymm4[11],ymm6[12],ymm4[13],ymm6[14,15]
+; AVX512F-FAST-NEXT:    vpermd %ymm3, %ymm9, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
+; AVX512F-FAST-NEXT:    vpermd %ymm27, %ymm16, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm2, %ymm2
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm25, %zmm2
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm12[0],ymm5[1],ymm12[2,3],ymm5[4],ymm12[5],ymm5[6],ymm12[7,8],ymm5[9],ymm12[10,11],ymm5[12],ymm12[13],ymm5[14],ymm12[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm5, %ymm22
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpermi2d %zmm0, %zmm26, %zmm21
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm4[1,2],ymm2[3],ymm4[4],ymm2[5],ymm4[6,7],ymm2[8],ymm4[9,10],ymm2[11],ymm4[12],ymm2[13],ymm4[14,15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm4, %ymm20
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm22
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,3,u,u,5,0,2,7>
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm15[0],ymm3[1],ymm15[2],ymm3[3],ymm15[4,5],ymm3[6],ymm15[7,8],ymm3[9],ymm15[10],ymm3[11],ymm15[12,13],ymm3[14],ymm15[15]
-; AVX512F-FAST-NEXT:    vmovdqa %ymm3, %ymm2
-; AVX512F-FAST-NEXT:    vpermd %ymm7, %ymm1, %ymm7
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [2,3,4,5,10,11,0,1,14,15,14,15,2,3,4,5,18,19,20,21,26,27,16,17,30,31,30,31,18,19,20,21]
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm7, %ymm7
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [6,7,0,1,10,11,4,5,14,15,8,9,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm7[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm7, %zmm27
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm10
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0],ymm10[1],ymm8[2,3],ymm10[4],ymm8[5],ymm10[6],ymm8[7,8],ymm10[9],ymm8[10,11],ymm10[12],ymm8[13],ymm10[14],ymm8[15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm13
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm13[1,2,3],xmm0[4,5],xmm13[6,7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [0,1,0,1,10,11,4,5,14,15,8,9,2,3,12,13]
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,u,u,4,5,14,15,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm11, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm11, %xmm30
+; AVX512F-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm17[2],xmm1[3],xmm17[3]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = <0,3,5,2,5,7,u,u>
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm7[0],ymm14[1],ymm7[2],ymm14[3],ymm7[4,5],ymm14[6],ymm7[7,8],ymm14[9],ymm7[10],ymm14[11],ymm7[12,13],ymm14[14],ymm7[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm7, %ymm16
+; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm24, %ymm5
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,6,7,2,3,2,3,4,5,10,11,0,1,14,15,16,17,22,23,18,19,18,19,20,21,26,27,16,17,30,31]
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm5
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm5[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,2,3,8,9,2,3,12,13,6,7,0,1,10,11]
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm27, %ymm3
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0],ymm0[1,2,3,4,5,6,7],ymm3[8],ymm0[9,10,11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm27, %zmm23
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm6[0],ymm5[1,2],ymm6[3],ymm5[4],ymm6[5],ymm5[6,7],ymm6[8],ymm5[9,10],ymm6[11],ymm5[12],ymm6[13],ymm5[14,15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm5, %ymm17
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm6, %ymm27
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm6
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm3 = xmm6[0],xmm3[1],xmm6[2],xmm3[3]
-; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm3, %xmm3
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm9[0],ymm14[1],ymm9[2],ymm14[3],ymm9[4,5],ymm14[6],ymm9[7,8],ymm14[9],ymm9[10],ymm14[11],ymm9[12,13],ymm14[14],ymm9[15]
-; AVX512F-FAST-NEXT:    vmovdqa %ymm9, %ymm5
-; AVX512F-FAST-NEXT:    vpermd %ymm6, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm7, %zmm28
-; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vpblendd $11, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload
-; AVX512F-FAST-NEXT:    # xmm1 = mem[0,1],xmm0[2],mem[3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[8,9,2,3,12,13,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpblendw $214, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm3 = ymm0[0],mem[1,2],ymm0[3],mem[4],ymm0[5],mem[6,7],ymm0[8],mem[9,10],ymm0[11],mem[12],ymm0[13],mem[14,15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <1,4,6,3,6,u,u,u>
-; AVX512F-FAST-NEXT:    vpermd %ymm3, %ymm0, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,2,3,4,5,10,11,0,1,14,15,16,17,18,19,20,21,18,19,20,21,26,27,16,17,30,31]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [0,2,1,3,0,2,5,7]
-; AVX512F-FAST-NEXT:    vpermd (%rsp), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[0,1,6,7,4,5,6,7,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm18, %ymm9
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm25, %ymm0
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm9[0],ymm0[1],ymm9[2,3],ymm0[4],ymm9[5],ymm0[6],ymm9[7,8],ymm0[9],ymm9[10,11],ymm0[12],ymm9[13],ymm0[14],ymm9[15]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm0[5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm26, %ymm21
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm26, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm31, %ymm8
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm8[2],ymm0[3],ymm8[4],ymm0[5,6],ymm8[7],ymm0[8,9],ymm8[10],ymm0[11],ymm8[12],ymm0[13,14],ymm8[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm8
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm0[0,1,2],xmm8[3,4],xmm0[5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm26 = <0,2,u,u,5,7,2,4>
+; AVX512F-FAST-NEXT:    vpblendw $82, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm0 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm0 = ymm15[0],mem[1],ymm15[2,3],mem[4],ymm15[5],mem[6],ymm15[7,8],mem[9],ymm15[10,11],mem[12],ymm15[13],mem[14],ymm15[15]
+; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm26, %ymm13
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [0,1,6,7,8,9,14,15,4,5,6,7,0,1,6,7,16,17,22,23,24,25,30,31,20,21,22,23,16,17,22,23]
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm13, %ymm13
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [4,5,14,15,8,9,2,3,12,13,6,7,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm8, %xmm8
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2],ymm13[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [1,4,6,0,1,4,6,0]
+; AVX512F-FAST-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpermd %ymm20, %ymm13, %ymm15
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
+; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm15, %ymm15
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm8, %zmm25, %zmm15
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm15, %zmm3
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5],ymm4[6],ymm6[7,8],ymm4[9],ymm6[10,11],ymm4[12],ymm6[13],ymm4[14],ymm6[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm8
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm8[0,1,2],xmm3[3,4],xmm8[5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm28, %ymm6
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm8[0],ymm6[1],ymm8[2],ymm6[3],ymm8[4,5],ymm6[6],ymm8[7,8],ymm6[9],ymm8[10],ymm6[11],ymm8[12,13],ymm6[14],ymm8[15]
+; AVX512F-FAST-NEXT:    vpermd %ymm5, %ymm24, %ymm5
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm10, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa %xmm10, %xmm11
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm29, %xmm10
+; AVX512F-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm29[2],xmm2[3],xmm29[3]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm2
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqu (%rsp), %ymm15 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm18, %ymm3
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1],ymm15[2],ymm3[3],ymm15[4],ymm3[5,6],ymm15[7],ymm3[8,9],ymm15[10],ymm3[11],ymm15[12],ymm3[13,14],ymm15[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm18, %ymm29
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3,4],xmm2[5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm19, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm23, %ymm5
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5],ymm4[6],ymm5[7,8],ymm4[9],ymm5[10,11],ymm4[12],ymm5[13],ymm4[14],ymm5[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm19, %ymm28
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm23, %ymm19
+; AVX512F-FAST-NEXT:    vpermd %ymm3, %ymm26, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermd %ymm27, %ymm13, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm2, %ymm2
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm25, %zmm2
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm30, %xmm7
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm17, %xmm13
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm0 = xmm13[0],xmm7[1],xmm13[2,3]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <6,7,0,1,10,11,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = <1,3,6,0,5,u,u,u>
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm16, %ymm9
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm9[0,1],ymm14[2],ymm9[3],ymm14[4],ymm9[5,6],ymm14[7],ymm9[8,9],ymm14[10],ymm9[11],ymm14[12],ymm9[13,14],ymm14[15]
+; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm17, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [2,3,2,3,4,5,0,1,6,7,8,9,14,15,4,5,18,19,18,19,20,21,16,17,22,23,24,25,30,31,20,21]
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm4
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [2,4,7,0,2,4,7,0]
+; AVX512F-FAST-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpermd %ymm20, %ymm4, %ymm5
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm5
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm24
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm1 = xmm10[0],xmm11[1],xmm10[2,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm11, %xmm16
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm10, %xmm30
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm8[0,1],ymm6[2],ymm8[3],ymm6[4],ymm8[5,6],ymm6[7],ymm8[8,9],ymm6[10],ymm8[11],ymm6[12],ymm8[13,14],ymm6[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm8, %ymm18
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm6, %ymm23
+; AVX512F-FAST-NEXT:    vpermd %ymm2, %ymm17, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermd %ymm27, %ymm4, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm25
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm1 = xmm13[0,1],xmm7[2],xmm13[3]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm14[0],ymm9[1,2],ymm14[3],ymm9[4],ymm14[5],ymm9[6,7],ymm14[8],ymm9[9,10],ymm14[11],ymm9[12],ymm14[13],ymm9[14,15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <8,9,2,3,12,13,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm3
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <1,4,6,3,6,u,u,u>
+; AVX512F-FAST-NEXT:    vpermd %ymm2, %ymm4, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,2,3,4,5,10,11,0,1,14,15,16,17,18,19,20,21,18,19,20,21,26,27,16,17,30,31]
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm2, %ymm2
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm2[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [0,2,1,3,0,2,5,7]
+; AVX512F-FAST-NEXT:    vpermd %ymm20, %ymm26, %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,6,7,4,5,6,7,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm6
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm6, %zmm6
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm31, %ymm5
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm21, %ymm7
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0],ymm7[1,2],ymm5[3],ymm7[4],ymm5[5],ymm7[6,7],ymm5[8],ymm7[9,10],ymm5[11],ymm7[12],ymm5[13],ymm7[14,15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm31, %ymm17
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm7
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm3 = xmm7[0],xmm3[1],xmm7[2],xmm3[3]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <0,3,u,u,5,0,2,7>
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm5[0],ymm10[1],ymm5[2],ymm10[3],ymm5[4,5],ymm10[6],ymm5[7,8],ymm10[9],ymm5[10],ymm10[11],ymm5[12,13],ymm10[14],ymm5[15]
+; AVX512F-FAST-NEXT:    vpermd %ymm8, %ymm7, %ymm8
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [2,3,4,5,10,11,0,1,14,15,14,15,2,3,4,5,18,19,20,21,26,27,16,17,30,31,30,31,18,19,20,21]
+; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm8, %ymm8
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [6,7,0,1,10,11,4,5,14,15,8,9,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm3, %xmm3
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm8[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm8, %zmm24
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm22, %ymm3
+; AVX512F-FAST-NEXT:    vpblendw $82, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm3 = ymm3[0],mem[1],ymm3[2,3],mem[4],ymm3[5],mem[6],ymm3[7,8],mem[9],ymm3[10,11],mem[12],ymm3[13],mem[14],ymm3[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm13
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm3[0],xmm13[1,2,3],xmm3[4,5],xmm13[6,7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,0,1,10,11,4,5,14,15,8,9,2,3,12,13]
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm13, %xmm13
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm31
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm24, %ymm14
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm14[0],ymm13[1,2,3,4,5,6,7],ymm14[8],ymm13[9,10,11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm24, %zmm20
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm29, %ymm12
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm15[0],ymm12[1,2],ymm15[3],ymm12[4],ymm15[5],ymm12[6,7],ymm15[8],ymm12[9,10],ymm15[11],ymm12[12],ymm15[13],ymm12[14,15]
+; AVX512F-FAST-NEXT:    vmovdqa %ymm15, %ymm13
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm15
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm14 = xmm15[0],xmm14[1],xmm15[2],xmm14[3]
+; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm14, %xmm11
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm28, %ymm15
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm19, %ymm3
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm15[0],ymm3[1],ymm15[2],ymm3[3],ymm15[4,5],ymm3[6],ymm15[7,8],ymm3[9],ymm15[10],ymm3[11],ymm15[12,13],ymm3[14],ymm15[15]
+; AVX512F-FAST-NEXT:    vpermd %ymm14, %ymm7, %ymm7
+; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm7, %ymm7
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0,1,2],ymm7[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm7, %zmm8, %zmm25
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm16, %xmm7
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm30, %xmm8
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} xmm7 = xmm8[0,1],xmm7[2],xmm8[3]
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm7, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm23, %ymm7
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm18, %ymm8
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0],ymm8[1,2],ymm7[3],ymm8[4],ymm7[5],ymm8[6,7],ymm7[8],ymm8[9,10],ymm7[11],ymm8[12],ymm7[13],ymm8[14,15]
+; AVX512F-FAST-NEXT:    vpermd %ymm7, %ymm4, %ymm4
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm1
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermd %ymm27, %ymm26, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm9[0],ymm11[1],ymm9[2,3],ymm11[4],ymm9[5],ymm11[6],ymm9[7,8],ymm11[9],ymm9[10,11],ymm11[12],ymm9[13],ymm11[14],ymm9[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4,5],xmm2[6,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm31, %xmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm25, %ymm2
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4,5,6,7],ymm2[8],ymm1[9,10,11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm25, %zmm1
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm10[2],ymm5[3],ymm10[4],ymm5[5,6],ymm10[7],ymm5[8,9],ymm10[10],ymm5[11],ymm10[12],ymm5[13,14],ymm10[15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <1,3,u,u,6,0,3,5>
+; AVX512F-FAST-NEXT:    vpermd %ymm2, %ymm14, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,6,7,8,9,14,15,0,1,6,7,0,1,6,7,16,17,22,23,24,25,30,31,16,17,22,23,16,17,22,23]
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm2, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm5
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm21, %ymm7
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm7[0],ymm5[1],ymm7[2,3],ymm5[4],ymm7[5],ymm5[6],ymm7[7,8],ymm5[9],ymm7[10,11],ymm5[12],ymm7[13],ymm5[14],ymm7[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm7
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1,2],xmm5[3,4],xmm7[5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,9,2,3,12,13,6,7,0,1,10,11,0,1,6,7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm5
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    movb $7, %al
+; AVX512F-FAST-NEXT:    kmovw %eax, %k1
+; AVX512F-FAST-NEXT:    vinserti64x4 $0, %ymm2, %zmm0, %zmm6 {%k1}
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm6, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm22, %ymm5
+; AVX512F-FAST-NEXT:    vpblendw $181, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm5 = mem[0],ymm5[1],mem[2],ymm5[3],mem[4,5],ymm5[6],mem[7,8],ymm5[9],mem[10],ymm5[11],mem[12,13],ymm5[14],mem[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm8
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm8[2,3],xmm5[4,5,6],xmm8[7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [6,7,2,3,12,13,6,7,0,1,10,11,4,5,14,15]
+; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm5, %xmm5
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm2[0],ymm5[1,2,3,4,5,6,7],ymm2[8],ymm5[9,10,11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm6, %zmm2
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm12[0],ymm13[1],ymm12[2,3],ymm13[4],ymm12[5],ymm13[6],ymm12[7,8],ymm13[9],ymm12[10,11],ymm13[12],ymm12[13],ymm13[14],ymm12[15]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3,4],xmm6[5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm5
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm15[0,1],ymm3[2],ymm15[3],ymm3[4],ymm15[5,6],ymm3[7],ymm15[8,9],ymm3[10],ymm15[11],ymm3[12],ymm15[13,14],ymm3[15]
+; AVX512F-FAST-NEXT:    vpermd %ymm6, %ymm14, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2],ymm3[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $0, %ymm3, %zmm0, %zmm0 {%k1}
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm11[0],ymm9[1],ymm11[2],ymm9[3],ymm11[4,5],ymm9[6],ymm11[7,8],ymm9[9],ymm11[10],ymm9[11],ymm11[12,13],ymm9[14],ymm11[15]
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3],xmm3[4,5],xmm4[6,7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm3, %xmm3
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm28, %ymm4
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5,6],xmm4[7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm3, %xmm3
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm0, %ymm4
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1,2,3,4,5,6,7],ymm4[8],ymm3[9,10,11,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm28, %zmm3
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm15[0,1],ymm2[2],ymm15[3],ymm2[4],ymm15[5,6],ymm2[7],ymm15[8,9],ymm2[10],ymm15[11],ymm2[12],ymm15[13,14],ymm2[15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <1,3,u,u,6,0,3,5>
-; AVX512F-FAST-NEXT:    vpermd %ymm4, %ymm6, %ymm4
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [0,1,6,7,8,9,14,15,0,1,6,7,0,1,6,7,16,17,22,23,24,25,30,31,16,17,22,23,16,17,22,23]
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm4, %ymm4
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm20, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm22, %ymm11
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm2[0],ymm11[1],ymm2[2,3],ymm11[4],ymm2[5],ymm11[6],ymm2[7,8],ymm11[9],ymm2[10,11],ymm11[12],ymm2[13],ymm11[14],ymm2[15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm12
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm12[0,1,2],xmm11[3,4],xmm12[5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [8,9,2,3,12,13,6,7,0,1,10,11,0,1,6,7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm11[0,1,2],ymm4[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    movb $7, %al
-; AVX512F-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-FAST-NEXT:    vinserti64x4 $0, %ymm4, %zmm0, %zmm16 {%k1}
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm16, %ymm4
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm10[0],ymm8[1],ymm10[2],ymm8[3],ymm10[4,5],ymm8[6],ymm10[7,8],ymm8[9],ymm10[10],ymm8[11],ymm10[12,13],ymm8[14],ymm10[15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm13
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1],xmm13[2,3],xmm11[4,5,6],xmm13[7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [6,7,2,3,12,13,6,7,0,1,10,11,4,5,14,15]
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm4[0],ymm11[1,2,3,4,5,6,7],ymm4[8],ymm11[9,10,11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm16, %zmm4
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm27, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm8
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm8[0],ymm2[1],ymm8[2,3],ymm2[4],ymm8[5],ymm2[6],ymm8[7,8],ymm2[9],ymm8[10,11],ymm2[12],ymm8[13],ymm2[14],ymm8[15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm11
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0,1,2],xmm10[3,4],xmm11[5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm5[0,1],ymm14[2],ymm5[3],ymm14[4],ymm5[5,6],ymm14[7],ymm5[8,9],ymm14[10],ymm5[11],ymm14[12],ymm5[13,14],ymm14[15]
-; AVX512F-FAST-NEXT:    vpermd %ymm11, %ymm6, %ymm6
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm6, %ymm6
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1,2],ymm6[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $0, %ymm6, %zmm0, %zmm1 {%k1}
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm0[0],ymm9[1],ymm0[2],ymm9[3],ymm0[4,5],ymm9[6],ymm0[7,8],ymm9[9],ymm0[10],ymm9[11],ymm0[12,13],ymm9[14],ymm0[15]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5,6],xmm6[7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm5, %xmm5
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm1, %ymm6
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1,2,3,4,5,6,7],ymm6[8],ymm5[9,10,11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm0, %zmm5
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm31, %zmm30, %zmm6
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm29, %zmm7
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm26, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, (%rsi)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, 64(%rsi)
-; AVX512F-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vmovaps %zmm0, 64(%rdx)
-; AVX512F-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vmovaps %zmm0, (%rdx)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, 64(%rcx)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, (%rcx)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, 64(%r8)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm23, (%r8)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 64(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, (%r9)
-; AVX512F-FAST-NEXT:    addq $712, %rsp # imm = 0x2C8
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vmovaps %zmm3, (%rsi)
+; AVX512F-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vmovaps %zmm3, 64(%rsi)
+; AVX512F-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vmovaps %zmm3, 64(%rdx)
+; AVX512F-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vmovaps %zmm3, (%rdx)
+; AVX512F-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vmovaps %zmm3, 64(%rcx)
+; AVX512F-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vmovaps %zmm3, (%rcx)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 64(%r8)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm20, (%r8)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, 64(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, (%r9)
+; AVX512F-FAST-NEXT:    addq $520, %rsp # imm = 0x208
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
index 98ac424b382ac..59ff5b4f8f45b 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
@@ -1595,13 +1595,13 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm9[u,u,u,u,u,u,u,u,0,1,12,13,u,u,4,5]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm9[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm8[2,2,2,2,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,2,2]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3],xmm6[4,5],xmm7[6],xmm6[7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm10
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,u,u,u,u]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm12
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm12[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,6,7]
@@ -1614,24 +1614,26 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm2[0,1,2],ymm10[3,4,5,6,7],ymm2[8,9,10],ymm10[11,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm10[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,2,3,14,15,u,u,6,7]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3],xmm9[4,5],xmm8[6],xmm9[7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm12[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,u,u,u,u]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1],xmm9[2],xmm10[3],xmm9[4,5],xmm10[6,7]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2],ymm5[3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm5[0,1,2],ymm8[3,4,5,6,7],ymm5[8,9,10],ymm8[11,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm9[u,u,u,u,u,u,8,9,u,u,0,1,12,13,u,u]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm10
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm10[0,1,2,3,6,5,6,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm11[4],xmm8[5,6],xmm11[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm9[0,1,2,3,6,5,6,4]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm8[2,1,0,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm11[0,0,0,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm10[4],xmm8[5,6],xmm10[7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm12[2,1,2,3]
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm13[2,1,2,0,4,5,6,7]
@@ -1640,24 +1642,24 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm12[0,0,2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,1,3,3]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1,2],xmm14[3],xmm15[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 = ymm11[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 = ymm10[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm15[0,1,2],ymm8[3,4,5,6,7],ymm15[8,9,10],ymm8[11,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,6,5,4]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3,4],xmm15[5,6,7]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,7,5,6,5]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,2,0,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm10[4],xmm9[5,6],xmm10[7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm13[3,1,2,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[1,1,1,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5,6],xmm9[7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[0,1,3,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm12[1,2],xmm10[3],xmm12[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1,2],xmm11[3],xmm12[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm11[0,1,2],ymm9[3,4,5,6,7],ymm11[8,9,10],ymm9[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,3,2]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm11[5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7],ymm10[8,9,10],ymm9[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,3,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3,4],xmm10[5,6,7]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
 ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
@@ -1710,11 +1712,11 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm8[u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm9
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u,u,u,u,0,1,12,13,u,u,4,5]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm9[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm10
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm7
 ; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm7[2,1,0,3]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm12[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
@@ -1727,10 +1729,10 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm2[0,1,2],ymm10[3,4,5,6,7],ymm2[8,9,10],ymm10[11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm10[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,2,3,14,15,u,u,6,7]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3],xmm9[4,5],xmm8[6],xmm9[7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm12[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm10[2],xmm9[3],xmm10[4,5],xmm9[6,7]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
@@ -1742,7 +1744,7 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
 ; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm9[0,1,2,3,6,5,6,4]
 ; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm8[2,1,0,3]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm11[u,u,u,u,u,u,0,1,u,u,8,9,12,13,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm11[0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u]
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm10[4],xmm8[5,6],xmm10[7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
@@ -1759,7 +1761,7 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3,4],xmm15[5,6,7]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
 ; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,2,3,u,u,10,11,14,15,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u]
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5,6],xmm9[7]
 ; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
@@ -1788,12 +1790,12 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpblendvb %ymm11, %ymm1, %ymm4, %ymm1
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
 ; AVX2-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm0[u,u,u,u,0,1,4,5,u,u,12,13,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm0[0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4],xmm6[5],xmm4[6,7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm7[0,1,2,3,4],ymm4[5,6,7]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,2,3,6,7,u,u,14,15,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4],xmm0[5],xmm3[6,7]
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
@@ -1818,11 +1820,11 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm6 = xmm8[u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm8, %xmm9
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u,u,u,u,0,1,12,13,u,u,4,5]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm7 = xmm9[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm10
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm11 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm6 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm6 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm11, %xmm7
 ; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm12 = xmm7[2,1,0,3]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm7 = xmm12[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
@@ -1835,10 +1837,10 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm10 = ymm2[0,1,2],ymm10[3,4,5,6,7],ymm2[8,9,10],ymm10[11,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm10[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,5,5,5]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,2,3,14,15,u,u,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0,1,2],xmm8[3],xmm9[4,5],xmm8[6],xmm9[7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm9 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm9 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm10 = xmm12[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm10[2],xmm9[3],xmm10[4,5],xmm9[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
@@ -1850,7 +1852,7 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm9[0,1,2,3,6,5,6,4]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm11 = xmm8[2,1,0,3]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm8 = xmm11[u,u,u,u,u,u,0,1,u,u,8,9,12,13,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm8 = xmm11[0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm10[4],xmm8[5,6],xmm10[7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm10 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
@@ -1867,7 +1869,7 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3,4],xmm15[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,2,3,u,u,10,11,14,15,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5,6],xmm9[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
@@ -1896,12 +1898,12 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm11, %ymm1, %ymm4, %ymm1
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm6 = xmm0[u,u,u,u,0,1,4,5,u,u,12,13,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm6 = xmm0[0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4],xmm6[5],xmm4[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm7[0,1,2,3,4],ymm4[5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,2,3,6,7,u,u,14,15,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4],xmm0[5],xmm3[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7]
@@ -1924,7 +1926,7 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm2
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
 ; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm8[u,u,u,u,u,u,u,u,0,1,12,13,u,u,4,5]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm8[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm5[2,2,2,2,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,2,2]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm7[3],xmm6[4,5],xmm7[6],xmm6[7]
@@ -1934,7 +1936,7 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm13
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm13[0,2,0,3]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,6,6,7]
@@ -1943,24 +1945,26 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm1[0,1,2],ymm9[3,4,5,6,7],ymm1[8,9,10],ymm9[11,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,2,3,14,15,u,u,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3],xmm8[4,5],xmm5[6],xmm8[7]
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm13[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1],xmm9[2],xmm10[3],xmm9[4,5],xmm10[6,7]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1,2],ymm8[3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm8[0,1,2],ymm5[3,4,5,6,7],ymm8[8,9,10],ymm5[11,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm9[u,u,u,u,u,u,8,9,u,u,0,1,12,13,u,u]
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm10
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,2,1]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm10[0,1,2,3,6,5,6,4]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm11[4],xmm8[5,6],xmm11[7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm9[0,1,2,3,6,5,6,4]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm8[2,1,0,3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm11[0,0,0,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm10[4],xmm8[5,6],xmm10[7]
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm12[2,1,2,3]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm13[2,1,2,0,4,5,6,7]
@@ -1969,29 +1973,29 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm12[0,0,2,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,1,3,3]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1,2],xmm14[3],xmm15[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 = ymm11[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 = ymm10[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm15[0,1,2],ymm8[3,4,5,6,7],ymm15[8,9,10],ymm8[11,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3,4],xmm15[5,6,7]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,7,5,6,5]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,2,0,3]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,7,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm10[4],xmm9[5,6],xmm10[7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm13[3,1,2,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[1,1,1,1,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,7,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5,6],xmm9[7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[0,1,3,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm12[1,2],xmm10[3],xmm12[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1,2],xmm11[3],xmm12[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm11[0,1,2],ymm9[3,4,5,6,7],ymm11[8,9,10],ymm9[11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm11[5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3,4,5,6,7],ymm10[8,9,10],ymm9[11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3,4],xmm10[5,6,7]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5],ymm7[6],ymm6[7]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
 ; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm4[2,2,2,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
 ; AVX512F-SLOW-NEXT:    vpternlogq $236, %ymm11, %ymm7, %ymm10
@@ -2004,7 +2008,7 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm12[0,1,2,3],xmm7[4],xmm12[5],xmm7[6,7]
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm10[0,1,2,3,4],ymm7[5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
@@ -2031,44 +2035,44 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vmovdqa 160(%rdi), %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm3
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm4
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa 128(%rdi), %ymm2
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa 128(%rdi), %ymm1
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u]
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm8
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm8[u,u,u,u,u,u,u,u,0,1,12,13,u,u,4,5]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm8[0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm9
-; AVX512F-FAST-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm1[2,3],mem[2,3]
-; AVX512F-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm1, %ymm7
+; AVX512F-FAST-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm2[2,3],mem[2,3]
+; AVX512F-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm2, %ymm7
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm7[0],ymm6[1],ymm7[2,3,4,5],ymm6[6],ymm7[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm11[0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm13
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[2,1,0,3]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm13[u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1],xmm14[2],xmm12[3],xmm14[4,5],xmm12[6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0,1,2],ymm1[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm1[0,1,2],ymm9[3,4,5,6,7],ymm1[8,9,10],ymm9[11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm12[0,1,2],ymm2[3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm2[0,1,2],ymm9[3,4,5,6,7],ymm2[8,9,10],ymm9[11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,2,3,14,15,u,u,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2],xmm5[3],xmm8[4,5],xmm5[6],xmm8[7]
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm11[2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm13[u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm10[2],xmm9[3],xmm10[4,5],xmm9[6,7]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1,2],ymm8[3,4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm8[0,1,2],ymm5[3,4,5,6,7],ymm8[8,9,10],ymm5[11,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm9
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
 ; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm9[0,1,2,3,6,5,6,4]
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm8[2,1,0,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm11[u,u,u,u,u,u,0,1,u,u,8,9,12,13,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm11[0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm10[4],xmm8[5,6],xmm10[7]
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7]
@@ -2084,7 +2088,7 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3,4],xmm15[5,6,7]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,2,3,u,u,10,11,14,15,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5,6],xmm9[7]
 ; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
@@ -2096,37 +2100,38 @@ define void @load_i16_stride6_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3,4,5],ymm7[6],ymm6[7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm10
-; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm10[2,2,2,2,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm4[8,9,u,u,0,1,12,13,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm11[0],xmm3[1],xmm11[2,3],xmm3[4],xmm11[5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm4[2,2,2,2,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $236, %ymm11, %ymm7, %ymm3
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm2[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
+; AVX512F-FAST-NEXT:    vpternlogq $236, %ymm11, %ymm7, %ymm10
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13]
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,0,1,4,5,u,u,12,13,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm0[0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm12[0,1,2,3],xmm7[4],xmm12[5],xmm7[6,7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [0,1,2,3,4,9,10,11]
-; AVX512F-FAST-NEXT:    vpermt2d %ymm7, %ymm12, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm10[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[10,11,u,u,2,3,14,15,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm7[1],xmm4[2,3],xmm7[4],xmm4[5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vpternlogq $236, %ymm11, %ymm6, %ymm4
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,2,3,6,7,u,u,14,15,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4],xmm0[5],xmm2[6,7]
-; AVX512F-FAST-NEXT:    vpermt2d %ymm0, %ymm12, %ymm4
-; AVX512F-FAST-NEXT:    vmovdqa %ymm1, (%rsi)
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm10[0,1,2,3,4],ymm7[5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-FAST-NEXT:    vpternlogq $236, %ymm11, %ymm4, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6,7]
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa %ymm2, (%rsi)
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm5, (%rdx)
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm8, (%rcx)
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm9, (%r8)
-; AVX512F-FAST-NEXT:    vmovdqa %ymm3, (%r9)
+; AVX512F-FAST-NEXT:    vmovdqa %ymm7, (%r9)
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-FAST-NEXT:    vmovdqa %ymm4, (%rax)
+; AVX512F-FAST-NEXT:    vmovdqa %ymm0, (%rax)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -3315,10 +3320,10 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm4[0],ymm8[1],ymm4[2,3,4,5],ymm8[6],ymm4[7]
 ; AVX2-SLOW-NEXT:    vpshufb %ymm13, %ymm6, %ymm4
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm9
-; AVX2-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm1
+; AVX2-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm9[2],ymm1[3,4],ymm9[5],ymm1[6,7]
+; AVX2-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm9
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0,1],ymm1[2],ymm9[3,4],ymm1[5],ymm9[6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm1[2,2,2,2,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,2,2]
@@ -3388,37 +3393,40 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm2 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm4 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm4 = ymm9[0,1],mem[2],ymm9[3,4],mem[5],ymm9[6,7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm1
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[0,1,2,1]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,8,9,u,u,0,1,12,13,u,u>
-; AVX2-SLOW-NEXT:    vpshufb %xmm5, %xmm4, %xmm1
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm6[0,1,2,3,6,5,6,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm7[4],xmm1[5,6],xmm7[7]
-; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm7 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm7 = mem[0,1],ymm10[2],mem[3,4],ymm10[5],mem[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm7[2,1,2,3]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm7
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,3,2,1]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm7[0,0,2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,1,3,3]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm11[2,1,2,0,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm12 = xmm13[0],xmm12[1,2],xmm13[3],xmm12[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1],ymm9[2],mem[3,4],ymm9[5],mem[6,7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm4
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,2,1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[2,1,0,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm6[0,0,0,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm4[0,1,2,3,6,5,6,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm5[4],xmm1[5,6],xmm5[7]
+; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm5 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm5 = mem[0,1],ymm10[2],mem[3,4],ymm10[5],mem[6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm5[2,1,2,3]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm5
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm5[0,3,2,1]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm11[0,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,3,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm7[2,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm12[0],xmm5[1,2],xmm12[3],xmm5[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 = ymm2[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm15[0,1,2],ymm1[3,4,5,6,7],ymm15[8,9,10],ymm1[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,6,5,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3,4],xmm15[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = ymm2[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm13[0,1,2],ymm1[3,4,5,6,7],ymm13[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,6,5,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm13[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm1[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm5, %xmm12, %xmm5
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm15
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm15[0,1,2,3,6,5,6,4]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm10[0,1],ymm9[2],ymm10[3,4],ymm9[5],ymm10[6,7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm13
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,2,1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm5[2,1,0,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm15[0,0,0,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm13[0,1,2,3,6,5,6,4]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm8[4],xmm5[5,6],xmm8[7]
 ; AVX2-SLOW-NEXT:    vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm8 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm8 = ymm14[0,1],mem[2],ymm14[3],mem[4],ymm14[5,6],mem[7]
@@ -3430,20 +3438,20 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm0[0,0,2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,3]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm1[2,1,2,0,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm13[0],xmm3[1,2],xmm13[3],xmm3[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm1[2,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm12[0],xmm3[1,2],xmm12[3],xmm3[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = ymm8[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm13[0,1,2],ymm5[3,4,5,6,7],ymm13[8,9,10],ymm5[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,6,5,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm13[5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm8[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm12[0,1,2],ymm5[3,4,5,6,7],ymm12[8,9,10],ymm5[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,6,5,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm12[5,6,7]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm3[0,1,2,3],ymm5[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm6[0,1,2,3,7,5,6,5]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,2,0,3]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm4[0,1,2,3,7,5,6,5]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm6[1,1,1,1,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm11[3,1,2,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm7[0,1,3,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm7[3,1,2,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm11[0,1,3,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1,2],xmm4[3],xmm6[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
@@ -3453,8 +3461,8 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,2]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3,4],xmm2[5,6,7]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm15[0,1,2,3,7,5,6,5]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm12[0,2,0,3]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm13[0,1,2,3,7,5,6,5]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm15[1,1,1,1,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
 ; AVX2-SLOW-NEXT:    vpshufb %ymm6, %ymm8, %ymm4
@@ -3467,7 +3475,7 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm4[0,1,3,2]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm3[5,6,7]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0],ymm9[1],ymm10[2,3],ymm9[4],ymm10[5,6],ymm9[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0],ymm10[1],ymm9[2,3],ymm10[4],ymm9[5,6],ymm10[7]
 ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm3
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm1[0,1,0,2,4,5,6,7]
@@ -3479,8 +3487,8 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm4 = mem[0,1,2,3,4],ymm4[5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm7 = mem[0],ymm7[1],mem[2,3],ymm7[4],mem[5,6],ymm7[7]
+; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm7 = ymm7[0],mem[1],ymm7[2,3],mem[4],ymm7[5,6],mem[7]
 ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm8
 ; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm8, %xmm6
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,3,2,1]
@@ -4087,236 +4095,240 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW:       # %bb.0:
 ; AVX512F-ONLY-SLOW-NEXT:    pushq %rax
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 224(%rdi), %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 224(%rdi), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0],ymm13[1],ymm2[2,3],ymm13[4],ymm2[5,6],ymm13[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm19
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm17
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm18
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm8[0,2,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm7[0,2,0,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm5
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm10
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm11
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm17
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm7, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm1[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm6[3],xmm5[4,5],xmm6[6],xmm5[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm14[0,1],ymm5[2],ymm14[3,4],ymm5[5],ymm14[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm23
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm6, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm2[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm9[3],xmm5[4,5],xmm9[6],xmm5[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm5, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm5, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm3[2,3],mem[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm10[0],ymm11[1],ymm10[2,3],ymm11[4],ymm10[5,6],ymm11[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm24
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm6[0,2,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2],xmm0[3],xmm5[4,5],xmm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm3, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm3[0],ymm12[1],ymm3[2,3,4,5],ymm12[6],ymm3[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm28
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm20
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0],ymm11[1],ymm10[2,3],ymm11[4],ymm10[5,6],ymm11[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm25
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm26
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm5[0,2,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,6,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm9[2],xmm0[3],xmm9[4,5],xmm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm3, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm9[0],ymm12[1],ymm9[2,3,4,5],ymm12[6],ymm9[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm9, %ymm29
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm15
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm9[3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1],ymm3[2],ymm14[3,4],ymm3[5],ymm14[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm23
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm3, %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1],ymm9[2],ymm10[3,4],ymm9[5],ymm10[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm16
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm9, %ymm24
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm9, %xmm8
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm0[2,2,2,2,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2],xmm10[3],xmm9[4,5],xmm10[6],xmm9[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm11 = ymm10[2,3],mem[2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 288(%rdi), %ymm10, %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm12[0],ymm11[1],ymm12[2,3,4,5],ymm11[6],ymm12[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm26
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm10[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm11[0,1,2],ymm9[3,4,5,6,7],ymm11[8,9,10],ymm9[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,4,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm9, %ymm30
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm8, %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm4, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm8[2],xmm4[3],xmm8[4,5],xmm4[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm7, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm7[0,1,2],xmm1[3],xmm7[4,5],xmm1[6],xmm7[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm1, %zmm21
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm6, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4,5],xmm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm15 = ymm1[0,1,2],ymm2[3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm3, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2],xmm10[3],xmm8[4,5],xmm10[6],xmm8[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm11 = ymm8[2,3],mem[2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 288(%rdi), %ymm8, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm12[0],ymm11[1],ymm12[2,3,4,5],ymm11[6],ymm12[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm20
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm28
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm8[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3,4,5,6,7],ymm12[8,9,10],ymm10[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,4,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm30
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm7, %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm12, %xmm4, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm7[2],xmm4[3],xmm7[4,5],xmm4[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm7, %xmm6, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm6[0,1,2],xmm2[3],xmm6[4,5],xmm2[6],xmm6[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm2, %zmm21
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm5, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm1[0,1,2],ymm2[3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm7, %xmm9, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm10[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm8[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm13[2],ymm0[3,4],ymm13[5],ymm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm13, %ymm31
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm19
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm31
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm0[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm1[0,3,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm11[0,0,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm10[2,1,2,0,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm29
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[2,1,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,0,0,0,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm2[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm9[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5,6],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm17
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm14[2],ymm1[3,4],ymm14[5],ymm1[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm22
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm1[2,1,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm9[0,0,0,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm2[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm17
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[0,3,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[0,3,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm6[0,0,2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm7[2,1,2,0,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2],xmm2[3],xmm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm12[0,1],ymm13[2],ymm12[3],ymm13[4],ymm12[5,6],ymm13[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm8[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm2[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm0[0,1],ymm14[2],ymm0[3,4],ymm14[5],ymm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm20
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm4[u,u,u,u,u,u,8,9,u,u,0,1,12,13,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm2[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm14[4],xmm3[5,6],xmm14[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1],ymm3[2],ymm0[3],ymm3[4],ymm0[5,6],ymm3[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm3[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm7[2,1,2,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm12[0,1],ymm15[2],ymm12[3],ymm15[4],ymm12[5,6],ymm15[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm15, %ymm18
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm5[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm29
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,1,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[0,0,0,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm3[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm14[4],xmm1[5,6],xmm14[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm15
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm15[0,1],ymm0[2],ymm15[3],ymm0[4],ymm15[5,6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5,6,7],ymm0[8,9,10],ymm14[11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm16
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm17, %zmm0, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm17, %zmm0, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm17, %zmm16
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm10[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm16
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm10[3,1,2,1,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm11[0,1,3,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm10[1,2],xmm1[3],xmm10[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,2,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm9[4],xmm5[5,6],xmm9[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm5, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm18, %zmm0, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm7[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm10[1,2],xmm2[3],xmm10[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[1,1,1,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0,1,2,3],xmm8[4],xmm9[5,6],xmm8[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm8, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm30
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm0, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,3,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm6[1,2],xmm5[3],xmm6[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm8[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm6[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,2,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1,2],xmm7[3],xmm6[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm5[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,1,1,1,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4],xmm4[5,6],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,4,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm0, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm17, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm0, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm20
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm17, %zmm20
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm5, %xmm1, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm5, %xmm1, %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm6[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,3,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm3[0,1,0,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm7, %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm8[4],xmm4[5],xmm8[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm4, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0],ymm12[1],ymm13[2,3,4,5],ymm12[6],ymm13[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm8[0,1],ymm10[2],ymm8[3,4],ymm10[5],ymm8[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm5, %xmm10, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm8[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm11[1],xmm5[2,3],xmm11[4],xmm5[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $236, %ymm22, %ymm11, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[0,3,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[0,1,0,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm7, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4],xmm2[5],xmm4[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0],ymm12[1],ymm0[2,3,4,5],ymm12[6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm9[2],ymm0[3,4],ymm9[5],ymm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm5, %xmm0, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm9[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm10[1],xmm5[2,3],xmm10[4],xmm5[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $236, %ymm11, %ymm10, %ymm5
 ; AVX512F-ONLY-SLOW-NEXT:    movw $31, %ax
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm5, %zmm4 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0],ymm5[1],ymm11[2,3],ymm5[4],ymm11[5,6],ymm5[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm5, %xmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm11[0,3,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm9[0,1,0,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3],xmm13[4],xmm11[5],xmm13[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0],ymm13[1],ymm14[2,3,4,5],ymm13[6],ymm14[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm13[u,u,u,u,u,u,u,u,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm14[0,1,2,3,4],ymm11[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm11
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0],ymm5[1],ymm10[2,3],ymm5[4],ymm10[5,6],ymm5[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm5, %xmm12
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm10[0,3,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm8[0,1,0,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3],xmm12[4],xmm10[5],xmm12[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0],ymm15[1],ymm12[2,3,4,5],ymm15[6],ymm12[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm12[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm14[0,1,2,3,4],ymm10[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm10
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,3]
@@ -4329,45 +4341,45 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm7[4],xmm3[5],xmm7[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm3, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm10, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm8[1,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm7[1],xmm3[2,3],xmm7[4],xmm3[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm9[1,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $236, %ymm22, %ymm2, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm3, %zmm1 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm9[0,1,1,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm13[u,u,u,u,u,u,u,u,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $236, %ymm11, %ymm2, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm8[0,1,1,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4],xmm2[5],xmm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm12[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm3 # 64-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    movw $-2048, %ax # imm = 0xF800
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm28, %zmm5 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, (%rsi)
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm21, %zmm3, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm30, %zmm15 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, (%rdx)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm30, %zmm3 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, (%rsi)
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm21, %zmm2, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm19, %zmm13 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, (%rdx)
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm17, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm17, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm17, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm17, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, (%rcx)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, (%r8)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, (%r9)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, (%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm20, (%r8)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, (%r9)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    popq %rax
 ; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
 ; AVX512F-ONLY-SLOW-NEXT:    retq
 ;
 ; AVX512F-ONLY-FAST-LABEL: load_i16_stride6_vf32:
 ; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $72, %rsp
+; AVX512F-ONLY-FAST-NEXT:    subq $136, %rsp
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 224(%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdi), %ymm2
@@ -4397,24 +4409,25 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0],ymm6[1],ymm12[2,3],ymm6[4],ymm12[5,6],ymm6[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm12, %ymm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm12, %ymm14
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm5[2,1,0,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm11[2,3],mem[2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm11, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm14[0],ymm2[1],ymm14[2,3,4,5],ymm2[6],ymm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm27
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm11, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm11[0],ymm2[1],ymm11[2,3,4,5],ymm2[6],ymm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm11, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm28
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, (%rsp) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 352(%rdi), %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%rdi), %ymm11
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1],ymm2[2],ymm11[3,4],ymm2[5],ymm11[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm11, %ymm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm11, %ymm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm25
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm8
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm8, %xmm10
@@ -4424,30 +4437,31 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm11 = ymm2[2,3],mem[2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 288(%rdi), %ymm2, %ymm12
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm12[0],ymm11[1],ymm12[2,3,4,5],ymm11[6],ymm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm12, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm11, %ymm26
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm2[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm11[0,1,2],ymm10[3,4,5,6,7],ymm11[8,9,10],ymm10[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,4,6]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm10, %ymm28
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm12, %ymm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm11, %ymm27
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm2[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3,4,5,6,7],ymm12[8,9,10],ymm10[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,4,6]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm10, %ymm29
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm4, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm9, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm9, %xmm9
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm9[2],xmm4[3],xmm9[4,5],xmm4[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm7, %xmm7
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0,1,2],xmm3[3],xmm7[4,5],xmm3[6],xmm7[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm3, %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm6, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm6, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm8, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
@@ -4456,428 +4470,429 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm18
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm29
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm30
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm30
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm31
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm0[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm1[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm10, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm11[2,1,2,0,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1],ymm15[2],ymm13[3,4],ymm15[5],ymm13[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm13, %ymm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm15, %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm2[2,1,0,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm8, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm5[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4],xmm2[5,6],xmm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm15[2],ymm1[3,4],ymm15[5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm0[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm10[2,1,2,0,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm1[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm11[u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm15[2],ymm13[3,4],ymm15[5],ymm13[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm13, %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm15, %ymm19
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[2,1,2,0,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm14[0,1],ymm12[2],ymm14[3],ymm12[4],ymm14[5,6],ymm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm7[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm1[2,1,0,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm8, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[0,1,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm5[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1],ymm0[2],ymm14[3,4],ymm0[5],ymm14[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm4[2,1,2,0,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm1[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm7[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm28, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm14[0,1],ymm13[2],ymm14[3],ymm13[4],ymm14[5,6],ymm13[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm6[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm1, %xmm16
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[2,1,0,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm16[0,1,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm9[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm13[4],xmm1[5,6],xmm13[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm9[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm12[4],xmm1[5,6],xmm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3],ymm1[4],ymm0[5,6],ymm1[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm0[0,1,2],ymm13[3,4,5,6,7],ymm0[8,9,10],ymm13[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm0[0,1,2],ymm12[3,4,5,6,7],ymm0[8,9,10],ymm12[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm12[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm16
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm17, %zmm0, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm10, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1,2],xmm11[3],xmm10[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm8, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm10[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm11[u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm10[1,2],xmm2[3],xmm10[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm8, %xmm8
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,5,6,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2,3],xmm5[4],xmm8[5,6],xmm5[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm5, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm5, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0],xmm2[1,2],xmm4[3],xmm2[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm28, %zmm0, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm22, %zmm0, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm7[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm4[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm3, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm9[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm7[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1,2],xmm4[3],xmm5[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm29, %zmm0, %zmm28
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm0, %zmm22
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm6[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm3, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm9[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm5[4],xmm3[5,6],xmm5[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm0, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm17, %zmm20
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm29, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm5, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm30, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm31, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm10 = xmm2[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm8, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm10, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0],ymm14[1],ymm12[2,3,4,5],ymm14[6],ymm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm15[0,1],ymm2[2],ymm15[3,4],ymm2[5],ymm15[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm11[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm24, %ymm2, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm8, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm9[0,1,2,3],xmm3[4],xmm9[5],xmm3[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0],ymm14[1],ymm13[2,3,4,5],ymm14[6],ymm13[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm7[0,1],ymm15[2],ymm7[3,4],ymm15[5],ymm7[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm9, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm11[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm12[1],xmm0[2,3],xmm12[4],xmm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm23, %ymm12, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    movw $31, %ax
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm3 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm2, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm7[4],xmm9[5],xmm7[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm13[0],ymm7[1],ymm13[2,3,4,5],ymm7[6],ymm13[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm7[u,u,u,u,u,u,u,u,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [0,1,2,3,4,9,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm9, %ymm23, %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm0, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm11, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm4, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm11[1],xmm4[2,3],xmm11[4],xmm4[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm24, %ymm4, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm6, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm5, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1],xmm5[2,3],xmm4[4],xmm5[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm8, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm10, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm10[0,1,2,3],xmm6[4],xmm10[5],xmm6[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm6, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm4 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm2, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm23, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm2 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm0[0],ymm7[1],ymm0[2,3],ymm7[4],ymm0[5,6],ymm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm12[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm10[0,1,2,3],xmm7[4],xmm10[5],xmm7[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0],ymm10[1],ymm12[2,3,4,5],ymm10[6],ymm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm12[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm10[0,1,2,3,4],ymm7[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm11, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm9, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm11[1],xmm9[2,3],xmm11[4],xmm9[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm23, %ymm9, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm6, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1],xmm4[2,3],xmm6[4],xmm4[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm8, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm8[4],xmm1[5],xmm8[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm2, %zmm1 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4],xmm2[5],xmm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm12[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    movw $-2048, %ax # imm = 0xF800
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm19, %zmm2 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%rsi)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm21, %zmm1, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm22, %zmm2 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%rdx)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm28, %zmm4 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, (%rsi)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 (%rsp), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm4 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm22, %zmm4 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, (%rdx)
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm17, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm17, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm17, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm17, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, (%rcx)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, (%r8)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, (%r9)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, (%r9)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $72, %rsp
+; AVX512F-ONLY-FAST-NEXT:    addq $136, %rsp
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
 ; AVX512DQ-SLOW-LABEL: load_i16_stride6_vf32:
 ; AVX512DQ-SLOW:       # %bb.0:
 ; AVX512DQ-SLOW-NEXT:    pushq %rax
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
-; AVX512DQ-SLOW-NEXT:    vmovdqa 224(%rdi), %ymm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm13
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm13[0],ymm12[1],ymm13[2,3],ymm12[4],ymm13[5,6],ymm12[7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm1
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm14
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm14[0,2,0,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
+; AVX512DQ-SLOW-NEXT:    vmovdqa 224(%rdi), %ymm13
+; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0],ymm13[1],ymm2[2,3],ymm13[4],ymm2[5,6],ymm13[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm18
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm10
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm10[0,2,0,3]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm8
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm5
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm4
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm15 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm19
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm15, %xmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm7, %xmm10, %xmm3
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm15[2,2,2,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,2,2]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm5[3],xmm3[4,5],xmm5[6],xmm3[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm3, %zmm16
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm2[2,3],mem[2,3]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0],ymm9[1],ymm8[2,3],ymm9[4],ymm8[5,6],ymm9[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm9, %ymm20
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm22
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[0,2,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm2, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0],ymm1[1],ymm2[2,3,4,5],ymm1[6],ymm2[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm27
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm26
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm0[0,1,2],ymm2[3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm2
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm6
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm15 = ymm6[0,1],ymm2[2],ymm6[3,4],ymm2[5],ymm6[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm19
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm21
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm7, %xmm2, %xmm7
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm1[2,2,2,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,2,2]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm11[3],xmm7[4,5],xmm11[6],xmm7[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm7
-; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm7[2,3],mem[2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 288(%rdi), %ymm7, %ymm11
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0],ymm0[1],ymm11[2,3,4,5],ymm0[6],ymm11[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm24
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm15, %xmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm6, %xmm7, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm15[2,2,2,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,1,2,2]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[3],xmm2[4,5],xmm8[6],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm16
+; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm3[2,3],mem[2,3]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0],ymm9[1],ymm5[2,3],ymm9[4],ymm5[5,6],ymm9[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm9, %ymm22
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm24
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm5[0,2,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,6,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm9[2],xmm1[3],xmm9[4,5],xmm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm3, %ymm8
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm8[0],ymm12[1],ymm8[2,3,4,5],ymm12[6],ymm8[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm29
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm1[0,1,2],ymm9[3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm23
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm25
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm7[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm0[0,1,2],ymm9[3,4,5,6,7],ymm0[8,9,10],ymm9[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm6, %xmm1, %xmm6
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm14[2,2,2,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,2,2]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm11[3],xmm6[4,5],xmm11[6],xmm6[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm6
+; AVX512DQ-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm6[2,3],mem[2,3]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, 288(%rdi), %ymm6, %ymm11
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm11[0],ymm0[1],ymm11[2,3,4,5],ymm0[6],ymm11[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm26
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm27
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm6[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm0[0,1,2],ymm8[3,4,5,6,7],ymm0[8,9,10],ymm8[11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm16, %zmm17, %zmm8
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm16, %zmm17, %zmm9
 ; AVX512DQ-SLOW-NEXT:    movw $-2048, %ax # imm = 0xF800
 ; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm8 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm9 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm14, %xmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm6, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm9[2],xmm6[3],xmm9[4,5],xmm6[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm10, %xmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm4, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm8[2],xmm4[3],xmm8[4,5],xmm4[6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm8, %xmm10, %xmm9
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm15[0,1,2,3,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2],xmm10[3],xmm9[4,5],xmm10[6],xmm9[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm9, %zmm6
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm8, %xmm7, %xmm7
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm15[3],xmm7[4,5],xmm15[6],xmm7[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm7, %zmm4
 ; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm4, %xmm4
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1],xmm0[2],xmm4[3],xmm0[4,5],xmm4[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2],ymm3[3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm8, %xmm2, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm8, %xmm1, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm14[0,1,2,3,5,5,5,5]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm6, %zmm17, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm4, %zmm17, %zmm2
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm7[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm6[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm3 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm29
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm2 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm20
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm13[2],ymm0[3,4],ymm13[5],ymm0[6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm13, %ymm30
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm0[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm0[0,3,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm15[0,0,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm7[2,1,2,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm0[0,3,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm10[0,0,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm15[2,1,2,0,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm28
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm13
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm13[0,1],ymm1[2],ymm13[3,4],ymm1[5],ymm13[6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm14[2,1,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,0,0,0,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,6,5,6,4]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4],xmm2[5,6],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm13
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm1[2],ymm13[3,4],ymm1[5],ymm13[6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm1[2,1,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm14[0,0,0,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm7[0,1,2,3,6,5,6,4]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm16
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,3,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm2[0,0,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[0,3,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm6[0,0,2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[2,1,2,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[2,1,2,0,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm12
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm9
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1],ymm12[2],ymm9[3],ymm12[4],ymm9[5,6],ymm12[7]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm5[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm11
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm11[0,1],ymm12[2],ymm11[3],ymm12[4],ymm11[5,6],ymm12[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm3[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm6[u,u,u,u,u,u,8,9,u,u,0,1,12,13,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm0[0,1,2,3,6,5,6,4]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm11[4],xmm1[5,6],xmm11[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm8
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm8[2],ymm1[3],ymm8[4],ymm1[5,6],ymm8[7]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm1[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm8[0,1,2],ymm11[3,4,5,6,7],ymm8[8,9,10],ymm11[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,6,5,4]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm16, %zmm8, %zmm10
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[2,1,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm5[0,0,0,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm1[0,1,2,3,6,5,6,4]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm9[4],xmm0[5,6],xmm9[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1],ymm2[2],ymm0[3],ymm2[4],ymm0[5,6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm0[0,1,2],ymm9[3,4,5,6,7],ymm0[8,9,10],ymm9[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm18
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm16, %zmm0, %zmm8
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm10, %zmm17, %zmm18
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm15[0,1,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0],xmm10[1,2],xmm7[3],xmm10[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm14[0,2,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm10[0,1,2,3],xmm4[4],xmm10[5,6],xmm4[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm7, %zmm4, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1,2],xmm3[3],xmm2[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm5[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm6[0,2,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4],xmm3[5,6],xmm0[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm4, %zmm8, %zmm2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm16
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm16
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm8, %zmm17, %zmm18
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm15[3,1,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm10[0,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm9[1,2],xmm8[3],xmm9[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,5,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm14[1,1,1,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm9[0,1,2,3],xmm7[4],xmm9[5,6],xmm7[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm8, %zmm7, %zmm7
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1,2],xmm4[3],xmm6[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm5[1,1,1,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0,1,2,3],xmm1[4],xmm4[5,6],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,4,5]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm7, %zmm0, %zmm3
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm16
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm17, %zmm16
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm4, %xmm0, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm7, %xmm0, %xmm1
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm5
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm5[2,2,2,2,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm13[1],ymm2[2,3],ymm13[4],ymm2[5,6],ymm13[7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm6
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm2[0,1,0,2,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm7, %xmm6, %xmm8
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm8[4],xmm3[5],xmm8[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm8, %xmm6, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5],xmm4[6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm3, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0],ymm9[1],ymm12[2,3,4,5],ymm9[6],ymm12[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm10
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm10[2],ymm8[3,4],ymm10[5],ymm8[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm4, %xmm8, %xmm4
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm13
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm13[2,2,2,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm10[1],xmm4[2,3],xmm10[4],xmm4[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $236, %ymm14, %ymm10, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0],ymm11[1],ymm12[2,3,4,5],ymm11[6],ymm12[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm9
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm9[2],ymm4[3,4],ymm9[5],ymm4[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm7, %xmm4, %xmm7
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm13
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm13[2,2,2,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0],xmm9[1],xmm7[2,3],xmm9[4],xmm7[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $236, %ymm10, %ymm9, %ymm7
 ; AVX512DQ-SLOW-NEXT:    movw $31, %ax
 ; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm4, %zmm0, %zmm3 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm10
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm4[0],ymm10[1],ymm4[2,3],ymm10[4],ymm4[5,6],ymm10[7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm4
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm7, %xmm4, %xmm11
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm10[0,3,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm7[0,1,0,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3],xmm11[4],xmm10[5],xmm11[6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm12
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm12[0],ymm11[1],ymm12[2,3,4,5],ymm11[6],ymm12[7]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 = ymm11[u,u,u,u,u,u,u,u,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm15[0,1,2,3,4],ymm10[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm15, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm7, %zmm0, %zmm3 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm9
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0],ymm9[1],ymm7[2,3],ymm9[4],ymm7[5,6],ymm9[7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm9
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm8, %xmm9, %xmm8
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,3,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm7[0,1,0,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm14[0,1,2,3],xmm8[4],xmm14[5],xmm8[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm11
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm14
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm14[0],ymm11[1],ymm14[2,3,4,5],ymm11[6],ymm14[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm11[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3,4],ymm8[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm5[1],xmm0[2,3],xmm5[4],xmm0[5,6,7]
@@ -4888,19 +4903,19 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4],xmm2[5],xmm6[6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm15, %xmm8, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm13[1,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1],xmm2[2,3],xmm6[4],xmm2[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm14, %xmm4, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm13[1,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3],xmm4[4],xmm2[5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpternlogq $236, %ymm14, %ymm1, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $236, %ymm10, %ymm1, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $0, %ymm2, %zmm0, %zmm0 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm5, %xmm4, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm5, %xmm9, %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm7[0,1,1,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4],xmm2[5],xmm1[6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm11[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
@@ -4908,11 +4923,11 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vmovaps %zmm2, (%rdx)
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm17, %zmm10
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm17, %zmm8
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm17, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, (%rcx)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm16, (%r8)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, (%r9)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, (%r9)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, (%rax)
 ; AVX512DQ-SLOW-NEXT:    popq %rax
 ; AVX512DQ-SLOW-NEXT:    vzeroupper
@@ -4920,69 +4935,68 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512DQ-FAST-LABEL: load_i16_stride6_vf32:
 ; AVX512DQ-FAST:       # %bb.0:
+; AVX512DQ-FAST-NEXT:    pushq %rax
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
 ; AVX512DQ-FAST-NEXT:    vmovdqa 224(%rdi), %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rdi), %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm20
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm6, %xmm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm20
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm21
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm5, %xmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,0,1,u,u,8,9,12,13,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm4[2,1,0,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm12, %xmm4
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2],xmm1[3],xmm4[4,5],xmm1[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa 160(%rdi), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm11
-; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %ymm4
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm22
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm23
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm15, %xmm4
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm8, %xmm5
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3],xmm5[4,5],xmm4[6],xmm5[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm5
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm3[2,1,0,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm14, %xmm3
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa 160(%rdi), %ymm12
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm7
+; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %ymm13
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm15, %xmm4
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm10, %xmm6
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0,1,2],xmm4[3],xmm6[4,5],xmm4[6],xmm6[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %ymm9
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm4, %zmm16
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm11[0],ymm5[1],ymm11[2,3],ymm5[4],ymm11[5,6],ymm5[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm19
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm11, %ymm21
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm7[0],ymm6[1],ymm7[2,3],ymm6[4],ymm7[5,6],ymm6[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm18
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm19
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[2,1,0,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[2,1,0,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm7, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6,7]
 ; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm9[2,3],mem[2,3]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm9, %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0],ymm1[1],ymm2[2,3,4,5],ymm1[6],ymm2[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm28
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm30
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm2[0],ymm1[1],ymm2[2,3,4,5],ymm1[6],ymm2[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm26
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm27
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm0[0,1,2],ymm2[3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 352(%rdi), %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa 320(%rdi), %ymm2
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm24
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm22
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm1, %xmm2
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm8
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm8[0,1,2],xmm2[3],xmm8[4,5],xmm2[6],xmm8[7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm8
+; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm2
+; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm2[2,3],mem[2,3]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, 288(%rdi), %ymm2, %ymm11
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0],ymm0[1],ymm11[2,3,4,5],ymm0[6],ymm11[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm11, %ymm24
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm25
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm7
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm2, %xmm10
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm10[0,1,2],xmm7[3],xmm10[4,5],xmm7[6],xmm10[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm11
-; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm11[2,3],mem[2,3]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, 288(%rdi), %ymm11, %ymm10
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm10[0],ymm0[1],ymm10[2,3,4,5],ymm0[6],ymm10[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm10, %ymm26
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm27
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm14[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm0[0,1,2],ymm7[3,4,5,6,7],ymm0[8,9,10],ymm7[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm0[0,1,2],ymm8[3,4,5,6,7],ymm0[8,9,10],ymm8[11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm16, %zmm17, %zmm9
 ; AVX512DQ-FAST-NEXT:    movw $-2048, %ax # imm = 0xF800
@@ -4990,201 +5004,203 @@ define void @load_i16_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm9 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm6, %xmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm12, %xmm12
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm12[2],xmm6[3],xmm12[4,5],xmm6[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm8, %xmm8
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm5, %xmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm14, %xmm14
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm14[2],xmm5[3],xmm14[4,5],xmm5[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm10, %xmm10
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,5,5,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2],xmm15[3],xmm8[4,5],xmm15[6],xmm8[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm8, %zmm6
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2],xmm15[3],xmm10[4,5],xmm15[6],xmm10[7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm10, %zmm5
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm0[0,1,2],ymm3[3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2],ymm3[3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm17, %zmm13
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm17, %zmm3
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm14[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm13 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm29
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm15 = xmm1[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm2[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm8, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm15[2,1,2,0,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[2,1,0,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm7, %xmm5
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm4[0,1,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm3[0,1,2,3,6,5,6,4]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4],xmm5[5,6],xmm4[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm4, %zmm16
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm3 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm5[2,1,2,0,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm28
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm15 = xmm0[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm15[2,1,2,0,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm0[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm8[u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm28, %ymm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm9
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm11[0,1],ymm9[2],ymm11[3],ymm9[4],ymm11[5,6],ymm9[7]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm14[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0,1],ymm13[2],ymm12[3,4],ymm13[5],ymm12[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm12, %ymm29
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm13, %ymm30
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[2,1,0,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm5, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,6,5,6,4]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm3, %zmm16
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm20
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm13
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm0[2],ymm13[3,4],ymm0[5],ymm13[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm4[2,1,2,0,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm3[u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm7[1,2],xmm0[3],xmm7[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm12
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm9
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm12[0,1],ymm9[2],ymm12[3],ymm9[4],ymm12[5,6],ymm9[7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm14[4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm7[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm7[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti32x4 $1, %ymm0, %xmm17
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,1,0,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm2, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[2,1,0,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm7, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm17[0,1,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm6[0,1,2,3,6,5,6,4]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm10[4],xmm0[5,6],xmm10[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm6[0,1,2,3,6,5,6,4]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm11[4],xmm0[5,6],xmm11[7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2],ymm0[3],ymm1[4],ymm0[5,6],ymm1[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm0[0,1,2],ymm10[3,4,5,6,7],ymm0[8,9,10],ymm10[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm0[0,1,2],ymm11[3,4,5,6,7],ymm0[8,9,10],ymm11[11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm18
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm16, %zmm0, %zmm12
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm16, %zmm0, %zmm10
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm12, %zmm17, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm8, %xmm8
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm15[3,1,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm12[0],xmm8[1,2],xmm12[3],xmm8[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm7, %xmm7
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0,1,2,3],xmm3[4],xmm7[5,6],xmm3[7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm8, %zmm3, %zmm3
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm4, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[3,1,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1,2],xmm5[3],xmm4[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm14[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm6[0,1,2,3,7,5,6,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4],xmm2[5,6],xmm5[7]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm10, %zmm17, %zmm18
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm15[3,1,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm10[0],xmm8[1,2],xmm10[3],xmm8[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4],xmm5[5,6],xmm2[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm8, %zmm2, %zmm2
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,2,3,6,7,u,u,14,15,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2],xmm4[3],xmm3[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm14[6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm4[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm6[0,1,2,3,7,5,6,5]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4],xmm4[5,6],xmm5[7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm1[0,1,2],ymm4[3,4,5,6,7],ymm1[8,9,10],ymm4[11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm0, %zmm4
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm0, %zmm3
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm16
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm17, %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm17, %zmm16
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm28, %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm0
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm15
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm15[2,2,2,2,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm0
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm15
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm15[2,2,2,2,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm6
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm6, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm5
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4],xmm5[5],xmm2[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm6, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm0, %xmm8
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm8[0,1,2,3],xmm2[4],xmm8[5],xmm2[6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0],ymm11[1],ymm9[2,3,4,5],ymm11[6],ymm9[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm9
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1],ymm5[2],ymm9[3,4],ymm5[5],ymm9[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm12
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm12[2,2,2,2,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm10[1],xmm1[2,3],xmm10[4],xmm1[5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0],ymm12[1],ymm9[2,3,4,5],ymm12[6],ymm9[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm8
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm13[2],ymm8[3,4],ymm13[5],ymm8[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm8, %xmm4
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm10
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm10[2,2,2,2,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm11[1],xmm4[2,3],xmm11[4],xmm4[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm19, %ymm10, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[4,5,0,1,12,13,24,25,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm19, %ymm13, %ymm4
 ; AVX512DQ-FAST-NEXT:    movw $31, %ax
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm1, %zmm0, %zmm2 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm10
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm1[0],ymm10[1],ymm1[2,3],ymm10[4],ymm1[5,6],ymm10[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm14
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm10[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm3, %xmm8
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm14[4],xmm8[5],xmm14[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm11
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm11[0],ymm10[1],ymm11[2,3,4,5],ymm10[6],ymm11[7]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm10[u,u,u,u,u,u,u,u,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [0,1,2,3,4,9,10,11]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm8, %ymm20, %ymm11
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm12, %xmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm12[1],xmm5[2,3],xmm12[4],xmm5[5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm19, %ymm5, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm15, %xmm5
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm4
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm6, %xmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm7, %xmm7
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4],xmm7[5],xmm6[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm6, %zmm4
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm0, %zmm0, %zmm4 {%k1}
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm1, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm3, %xmm1
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4],xmm1[5],xmm0[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm10[u,u,u,u,u,u,u,u,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm20, %ymm1
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm1, (%rsi)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, (%rdx)
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm4, %zmm0, %zmm2 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm11
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm4[0],ymm11[1],ymm4[2,3],ymm11[4],ymm4[5,6],ymm11[7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm14
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm13[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm7
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm14[4],xmm7[5],xmm14[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm12
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm12[0],ymm11[1],ymm12[2,3,4,5],ymm11[6],ymm12[7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm13[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm14[0,1,2,3,4],ymm7[5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm10, %xmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm8, %xmm8
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm10[1],xmm8[2,3],xmm10[4],xmm8[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[6,7,2,3,14,15,26,27,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm19, %ymm8, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm15, %xmm8
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm8[1],xmm3[2,3],xmm8[4],xmm3[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm6, %xmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4],xmm0[5],xmm6[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm3, %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm1, %zmm0, %zmm0 {%k1}
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm3
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5],xmm1[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm13[10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4],ymm1[5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm3, (%rsi)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm3, (%rdx)
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm8
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm17, %zmm0
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm7
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm17, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, (%rcx)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, (%r8)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, (%r9)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, (%r9)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
+; AVX512DQ-FAST-NEXT:    popq %rax
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
 ;
@@ -7455,7 +7471,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX2-SLOW-LABEL: load_i16_stride6_vf64:
 ; AVX2-SLOW:       # %bb.0:
-; AVX2-SLOW-NEXT:    subq $1256, %rsp # imm = 0x4E8
+; AVX2-SLOW-NEXT:    subq $1272, %rsp # imm = 0x4F8
 ; AVX2-SLOW-NEXT:    vmovdqa (%rdi), %ymm8
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm9
@@ -7571,33 +7587,34 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpblendvb %ymm9, %ymm0, %ymm1, %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm7 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm0
+; AVX2-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm6 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm0
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
-; AVX2-SLOW-NEXT:    vpshufb %xmm10, %xmm7, %xmm1
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
+; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm6, %xmm1
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21>
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,8,9,4,5,16,17,28,29,24,25,28,29,24,25,28,29,24,25,20,21>
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm1 = ymm1[0],mem[1],ymm1[2,3,4,5],mem[6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %ymm12, %ymm1, %ymm1
+; AVX2-SLOW-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
+; AVX2-SLOW-NEXT:    vmovdqa %ymm2, %ymm5
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqa 544(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 512(%rdi), %ymm2
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm6[2,2,2,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[2,2,2,2,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,2]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm2
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm2
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %xmm11, %xmm2, %xmm4
+; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm2, %xmm4
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3],xmm4[4,5],xmm1[6],xmm4[7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
@@ -7610,448 +7627,457 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm10, %xmm4, %xmm1
+; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm4, %xmm1
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
 ; AVX2-SLOW-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm1 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm1 = ymm14[0],mem[1],ymm14[2,3,4,5],mem[6],ymm14[7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %ymm12, %ymm1, %ymm1
+; AVX2-SLOW-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
+; AVX2-SLOW-NEXT:    vmovdqa %ymm5, %ymm10
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm1
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm2
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[2,2,2,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm2[2,2,2,2,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm5[0,1,2,2]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm1
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm1
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %xmm11, %xmm1, %xmm15
+; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm1, %xmm15
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0,1,2],xmm14[3],xmm15[4,5],xmm14[6],xmm15[7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5,6,7],ymm0[8,9,10],ymm14[11,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm2 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm0
+; AVX2-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm0
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm0[0,2,0,3]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,6,6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm10, %xmm2, %xmm15
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0,1],xmm14[2],xmm15[3],xmm14[4,5],xmm15[6,7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm1, %xmm13
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1],xmm14[2],xmm13[3],xmm14[4,5],xmm13[6,7]
 ; AVX2-SLOW-NEXT:    vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm0 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm0 = mem[0],ymm8[1],mem[2,3,4,5],ymm8[6],mem[7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %ymm12, %ymm0, %ymm15
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2],ymm15[3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm10, %ymm0, %ymm12
+; AVX2-SLOW-NEXT:    vmovdqa %ymm10, %ymm14
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqa 736(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa 704(%rdi), %ymm1
-; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm1[2,2,2,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,1,2,2]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm9
-; AVX2-SLOW-NEXT:    vpshufb %xmm11, %xmm9, %xmm13
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1,2],xmm15[3],xmm13[4,5],xmm15[6],xmm13[7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm13 = ymm14[0,1,2],ymm13[3,4,5,6,7],ymm14[8,9,10],ymm13[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1,2,3],ymm13[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqa 704(%rdi), %ymm5
+; AVX2-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm5[0,1],ymm0[2],ymm5[3,4],ymm0[5],ymm5[6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm13[2,2,2,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,2,2]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm8
+; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm8, %xmm10
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2],xmm11[3],xmm10[4,5],xmm11[6],xmm10[7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0,1,2],ymm10[3,4,5,6,7],ymm12[8,9,10],ymm10[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm10[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm5
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm5[0,1],ymm0[2],ymm5[3,4],ymm0[5],ymm5[6,7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm8
-; AVX2-SLOW-NEXT:    vpshufb %xmm11, %xmm8, %xmm11
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm13[2,2,2,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,1,2,2]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2],xmm15[3],xmm11[4,5],xmm15[6],xmm11[7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm5[0,1],ymm0[2],ymm5[3,4],ymm0[5],ymm5[6,7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm11
+; AVX2-SLOW-NEXT:    vpshufb %xmm7, %xmm11, %xmm7
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm10[2,2,2,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,1,2,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2],xmm12[3],xmm7[4,5],xmm12[6],xmm7[7]
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm15 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm10, %xmm15, %xmm0
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm15, %xmm5
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm5[0,2,0,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,6,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm14[2],xmm0[3],xmm14[4,5],xmm0[6,7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm14 = mem[0],ymm10[1],mem[2,3,4,5],ymm10[6],mem[7]
-; AVX2-SLOW-NEXT:    vpshufb %ymm12, %ymm14, %ymm12
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm12[3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm0[0,1,2],ymm11[3,4,5,6,7],ymm0[8,9,10],ymm11[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm12 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm12, %xmm0
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm5
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm5[0,2,0,3]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm9[2],xmm0[3],xmm9[4,5],xmm0[6,7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $189, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm9 = mem[0],ymm9[1],mem[2,3,4,5],ymm9[6],mem[7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm14, %ymm9, %ymm15
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm0[0,1,2],ymm7[3,4,5,6,7],ymm0[8,9,10],ymm7[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm7[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm0[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX2-SLOW-NEXT:    vpshufb %xmm12, %xmm7, %xmm7
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm11[2],xmm7[3],xmm11[4,5],xmm7[6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23>
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm0[u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm6, %xmm6
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm7[2],xmm6[3],xmm7[4,5],xmm6[6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,u,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23>
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %ymm11, %ymm0, %ymm10
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2],ymm10[3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
+; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm0, %ymm14
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2],ymm14[3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <6,7,2,3,4,5,u,u,2,3,14,15,u,u,6,7>
 ; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,5,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm6[3],xmm0[4,5],xmm6[6],xmm0[7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[3],xmm0[4,5],xmm3[6],xmm0[7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm7[0,1,2],ymm0[3,4,5,6,7],ymm7[8,9,10],ymm0[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3,4,5,6,7],ymm6[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,10,11,u,u,2,3,14,15,u,u,u,u>
 ; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX2-SLOW-NEXT:    vpshufb %xmm12, %xmm4, %xmm4
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1],xmm0[2],xmm4[3],xmm0[4,5],xmm4[6,7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %ymm11, %ymm4, %ymm4
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %xmm10, %xmm4, %xmm4
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm0[0,1,2],ymm3[3,4,5,6,7],ymm0[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm4, %xmm3
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1],xmm0[2],xmm3[3],xmm0[4,5],xmm3[6,7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm3, %ymm3
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX2-SLOW-NEXT:    vpshufb %xmm14, %xmm3, %xmm3
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,5,5]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3,4,5,6,7],ymm0[8,9,10],ymm2[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX2-SLOW-NEXT:    vpshufb %xmm12, %xmm2, %xmm2
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3],xmm0[4,5],xmm2[6,7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %ymm11, %ymm2, %ymm2
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm10, %xmm9, %xmm2
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm1, %xmm1
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6,7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm1, %ymm1
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm14, %xmm8, %xmm1
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm13[0,1,2,3,5,5,5,5]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %xmm10, %xmm8, %xmm0
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm13[0,1,2,3,5,5,5,5]
+; AVX2-SLOW-NEXT:    vpshufb %xmm14, %xmm11, %xmm0
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm10[0,1,2,3,5,5,5,5]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
 ; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm1
-; AVX2-SLOW-NEXT:    vpshufb %xmm12, %xmm15, %xmm2
+; AVX2-SLOW-NEXT:    vpshufb %xmm15, %xmm12, %xmm2
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3],xmm1[4,5],xmm2[6,7]
-; AVX2-SLOW-NEXT:    vpshufb %ymm11, %ymm14, %ymm2
+; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm9, %ymm2
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm4 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm3 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm1
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[0,1,2,1]
-; AVX2-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,8,9,u,u,0,1,12,13,u,u>
-; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm3, %xmm1
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,6,5,6,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
+; AVX2-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm2 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm2 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm2[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
+; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[0,3,2,1]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm7[0,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5,6],xmm1[7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm1[2,1,2,3]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[0,3,2,1]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm4[0,0,2,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,3,3]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm0[2,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm7[2,1,2,0,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1,2],xmm6[3],xmm5[4,5,6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
-; AVX2-SLOW-NEXT:    vpshufb %ymm0, %ymm4, %ymm6
-; AVX2-SLOW-NEXT:    vmovdqa %ymm0, %ymm8
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3,4,5,6,7],ymm6[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
+; AVX2-SLOW-NEXT:    vpshufb %ymm1, %ymm2, %ymm6
+; AVX2-SLOW-NEXT:    vmovdqa %ymm1, %ymm8
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3,4,5,6,7],ymm6[8,9,10],ymm0[11,12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,5,4]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm6[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm1 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm2 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm5[0,1,2,1]
-; AVX2-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm2, %xmm5
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm0[0,1,2,3,6,5,6,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm5[0,1,2,3],xmm10[4],xmm5[5,6],xmm10[7]
+; AVX2-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm2 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm12 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm12[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm5
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm5[0,1,2,1]
+; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,3]
 ; AVX2-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm12
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm12[0,3,2,1]
-; AVX2-SLOW-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm4[0,0,2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,3,3]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm0[2,1,2,0,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm14[0],xmm13[1,2],xmm14[3],xmm13[4,5,6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX2-SLOW-NEXT:    vmovdqa %ymm8, %ymm4
-; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm1, %ymm14
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm14[0,1,2],ymm10[3,4,5,6,7],ymm14[8,9,10],ymm10[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,6,5,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3,4],xmm14[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm10[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm1[0,1,2,3,6,5,6,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm5[4],xmm0[5,6],xmm5[7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm10 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm10 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm10[2,1,2,3]
+; AVX2-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm10
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm10[0,3,2,1]
+; AVX2-SLOW-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm3[0,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,3,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm1[2,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm12[0],xmm10[1,2],xmm12[3],xmm10[4,5,6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm2, %ymm12
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm12[0,1,2],ymm0[3,4,5,6,7],ymm12[8,9,10],ymm0[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,6,5,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm12[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm10 = ymm0[0,1],mem[2],ymm0[3],mem[4],ymm0[5,6],mem[7]
+; AVX2-SLOW-NEXT:    vpblendd $148, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm3 = ymm0[0,1],mem[2],ymm0[3],mem[4],ymm0[5,6],mem[7]
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm13 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm13 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm14
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm13, %xmm15
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm14[0,1,2,3,6,5,6,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm15[0,1,2,3],xmm8[4],xmm15[5,6],xmm8[7]
+; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm10 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm12
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,1,2,1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,1,0,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm10[0,0,0,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm12[0,1,2,3,6,5,6,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3],xmm14[4],xmm13[5,6],xmm14[7]
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm15 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm15[2,1,2,3]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm15, %xmm15
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,3,2,1]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm15[0,0,2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,3,3]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm1[2,1,2,0,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm6[1,2],xmm5[3],xmm6[4,5,6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm6
-; AVX2-SLOW-NEXT:    vpshufb %ymm4, %ymm10, %ymm8
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm8[0,1,2],ymm6[3,4,5,6,7],ymm8[8,9,10],ymm6[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,6,5,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm8[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm6[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm14 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm14[2,1,2,3]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm14
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,3,2,1]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm14[0,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,3,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm1[2,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm11[1,2],xmm6[3],xmm11[4,5,6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm11
+; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm3, %ymm13
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm13[0,1,2],ymm11[3,4,5,6,7],ymm13[8,9,10],ymm11[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,6,5,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm13[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm11[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm6 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm6 = ymm15[0,1],mem[2],ymm15[3,4],mem[5],ymm15[6,7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm11
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm11[0,1,2,1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,1,0,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm6[0,0,0,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,4,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm13[0,1,2,3,6,5,6,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4],xmm11[5,6],xmm9[7]
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm5 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm9, %xmm5, %xmm6
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm8
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,1,2,1]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm8[0,1,2,3,6,5,6,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm9[4],xmm6[5,6],xmm9[7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm9 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm11 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
 ; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX2-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[2,1,2,3]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,1,2,3]
 ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm0[0,0,2,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,1,3,3]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm4[2,1,2,0,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1,2],xmm11[3],xmm12[4,5,6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm9[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm12[0,1,2],ymm6[3,4,5,6,7],ymm12[8,9,10],ymm6[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,6,5,4]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3,4],xmm12[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm11[0,1,2,3],ymm6[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm6 = mem[0,1,2,3,7,5,6,5]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,2,0,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm6[4],xmm3[5,6],xmm6[7]
-; AVX2-SLOW-NEXT:    vpshuflw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm6 = mem[3,1,2,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,3,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm7[1,2],xmm6[3],xmm7[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm11, %ymm11
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm11[0,1,2],ymm3[3,4,5,6,7],ymm11[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,3,2]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm11[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm14[0,1,2,3,7,5,6,5]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm13[0,2,0,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0,1,2,3],xmm3[4],xmm6[5,6],xmm3[7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm0[0,0,2,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,3,3]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm2[2,1,2,0,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0],xmm5[1,2],xmm8[3],xmm5[4,5,6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm8
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm11[4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0,1,2],ymm8[3,4,5,6,7],ymm9[8,9,10],ymm8[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,6,5,4]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm9[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm8[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; AVX2-SLOW-NEXT:    # xmm5 = mem[0,1,2,3,7,5,6,5]
+; AVX2-SLOW-NEXT:    vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; AVX2-SLOW-NEXT:    # xmm8 = mem[1,1,1,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2,3],xmm5[4],xmm8[5,6],xmm5[7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,3,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1,2],xmm7[3],xmm4[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm7, %ymm7
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm7[0,1,2],ymm5[3,4,5,6,7],ymm7[8,9,10],ymm5[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,3,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm7[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm12[0,1,2,3,7,5,6,5]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm10[1,1,1,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1,2,3],xmm5[4],xmm7[5,6],xmm5[7]
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm15[0,1,3,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1,2],xmm1[3],xmm6[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm10, %ymm6
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm6[0,1,2],ymm3[3,4,5,6,7],ymm6[8,9,10],ymm3[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,3,2]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm6[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm1[0,1,2,3],ymm3[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm14[0,1,3,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm7[1,2],xmm1[3],xmm7[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm3, %ymm3
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm3[0,1,2],ymm5[3,4,5,6,7],ymm3[8,9,10],ymm5[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm3[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm1[0,1,2,3],ymm5[4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # xmm1 = mem[0,1,2,3,7,5,6,5]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,2,0,3]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4],xmm2[5,6],xmm1[7]
-; AVX2-SLOW-NEXT:    vpshuflw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm2 = mem[3,1,2,1,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw $244, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; AVX2-SLOW-NEXT:    # xmm3 = mem[0,1,3,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3],xmm3[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm3, %ymm3
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7],ymm3[8,9,10],ymm1[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,2]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,7,5,6,5]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[0,2,0,3]
+; AVX2-SLOW-NEXT:    vpshuflw $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; AVX2-SLOW-NEXT:    # xmm3 = mem[1,1,1,1,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4],xmm3[5,6],xmm1[7]
-; AVX2-SLOW-NEXT:    vpshufb %ymm7, %ymm9, %ymm3
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[3,1,2,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw $103, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; AVX2-SLOW-NEXT:    # xmm3 = mem[3,1,2,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw $244, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; AVX2-SLOW-NEXT:    # xmm5 = mem[0,1,3,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,7,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm5[1,2],xmm3[3],xmm5[4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm4, %ymm5
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm5[0,1,2],ymm1[3,4,5,6,7],ymm5[8,9,10],ymm1[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,3,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm5[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm13[0,1,2,3,7,5,6,5]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm6[1,1,1,1,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,7,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4],xmm5[5,6],xmm3[7]
+; AVX2-SLOW-NEXT:    vpshufb %ymm8, %ymm11, %ymm5
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,1,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,3,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,7,7,7]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0],xmm0[1,2],xmm4[3],xmm0[4,5,6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7],ymm3[8,9,10],ymm1[11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,2]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2],xmm2[3],xmm0[4,5,6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm2
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3,4,5,6,7],ymm5[8,9,10],ymm2[11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[0,1,3,2]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm3[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm0 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm0 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm0 = mem[0],ymm15[1],mem[2,3],ymm15[4],mem[5,6],ymm15[7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm5
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,3,2,1]
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm0[0,1,0,2,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,6,6,6]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX2-SLOW-NEXT:    vpshufb %xmm5, %xmm4, %xmm7
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm7[4],xmm3[5],xmm7[6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm8
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm8[4],xmm3[5],xmm8[6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm3 = mem[0,1,2,3,4],ymm3[5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm7 = ymm7[0],mem[1],ymm7[2,3],mem[4],ymm7[5,6],mem[7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm8
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,3,2,1]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm7[0,1,0,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,6,6,6,6]
-; AVX2-SLOW-NEXT:    vpshufb %xmm5, %xmm8, %xmm10
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm10[4],xmm9[5],xmm10[6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm9 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm9 = mem[0,1,2,3,4],ymm9[5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm10 = ymm10[0],mem[1],ymm10[2,3],mem[4],ymm10[5,6],mem[7]
-; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm12
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,3,2,1]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm10[0,1,0,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm8 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm8 = ymm4[0],mem[1],ymm4[2,3],mem[4],ymm4[5,6],mem[7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,3,2,1]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm8[0,1,0,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,6,6,6]
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm9, %xmm11
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3],xmm11[4],xmm10[5],xmm11[6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm10 = mem[0,1,2,3,4],ymm10[5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm11 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm11 = ymm4[0],mem[1],ymm4[2,3],mem[4],ymm4[5,6],mem[7]
+; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm12
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,3,2,1]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm11[0,1,0,2,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,6,6,6,6]
-; AVX2-SLOW-NEXT:    vpshufb %xmm5, %xmm12, %xmm14
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm12, %xmm14
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3],xmm14[4],xmm13[5],xmm14[6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
 ; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm13 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm13 = mem[0,1,2,3,4],ymm13[5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm14 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm14 = ymm11[0],mem[1],ymm11[2,3],mem[4],ymm11[5,6],mem[7]
+; AVX2-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm14 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm14 = ymm4[0],mem[1],ymm4[2,3],mem[4],ymm4[5,6],mem[7]
 ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm15
-; AVX2-SLOW-NEXT:    vpshufb %xmm5, %xmm15, %xmm5
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm15, %xmm6
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,3,2,1]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm14[0,1,0,2,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,6,6,6,6]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm11[0,1,2,3],xmm5[4],xmm11[5],xmm5[6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm5 = mem[0,1,2,3,4],ymm5[5,6,7]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX2-SLOW-NEXT:    vpshufb %xmm11, %xmm4, %xmm4
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm14[0,1,0,2,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,6,6,6]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm6[4],xmm4[5],xmm6[6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm4 = mem[0,1,2,3,4],ymm4[5,6,7]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm5
 ; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4],xmm0[5],xmm4[6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm5[4],xmm0[5],xmm5[6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm0 = mem[0,1,2,3,4],ymm0[5,6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm11, %xmm8, %xmm4
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,1,1,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,3,3]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0,1,2,3],xmm4[4],xmm7[5],xmm4[6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm4 = mem[0,1,2,3,4],ymm4[5,6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm11, %xmm12, %xmm7
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm10[0,1,1,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm9, %xmm5
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,1,1,3,4,5,6,7]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,1,3,3]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0,1,2,3],xmm7[4],xmm8[5],xmm7[6,7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
-; AVX2-SLOW-NEXT:    # ymm7 = mem[0,1,2,3,4],ymm7[5,6,7]
-; AVX2-SLOW-NEXT:    vpshufb %xmm11, %xmm15, %xmm8
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm14[0,1,1,3,4,5,6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,3,3]
-; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm10[0,1,2,3],xmm8[4],xmm10[5],xmm8[6,7]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1,2,3],xmm5[4],xmm8[5],xmm5[6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm5 = mem[0,1,2,3,4],ymm5[5,6,7]
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm12, %xmm8
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm11[0,1,1,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,3,3]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0,1,2,3],xmm8[4],xmm9[5],xmm8[6,7]
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
 ; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
 ; AVX2-SLOW-NEXT:    # ymm8 = mem[0,1,2,3,4],ymm8[5,6,7]
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 96(%rsi)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 32(%rsi)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 64(%rsi)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, (%rsi)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 96(%rdx)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 32(%rdx)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 64(%rdx)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, (%rdx)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 32(%rcx)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 96(%rcx)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, 64(%rcx)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm10, (%rcx)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm2, 96(%r8)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm6, 32(%r8)
-; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX2-SLOW-NEXT:    vmovaps %ymm2, 64(%r8)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm1, (%r8)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm5, 96(%r9)
+; AVX2-SLOW-NEXT:    vpshufb %xmm6, %xmm15, %xmm6
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm14[0,1,1,3,4,5,6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,3,3]
+; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm9[0,1,2,3],xmm6[4],xmm9[5],xmm6[6,7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX2-SLOW-NEXT:    vpblendd $31, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm6 # 32-byte Folded Reload
+; AVX2-SLOW-NEXT:    # ymm6 = mem[0,1,2,3,4],ymm6[5,6,7]
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 96(%rsi)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 32(%rsi)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 64(%rsi)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, (%rsi)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 96(%rdx)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 32(%rdx)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 64(%rdx)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, (%rdx)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 32(%rcx)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 96(%rcx)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, 64(%rcx)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm9, (%rcx)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm1, 96(%r8)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm7, 32(%r8)
+; AVX2-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX2-SLOW-NEXT:    vmovaps %ymm1, 64(%r8)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm2, (%r8)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm4, 96(%r9)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm13, 32(%r9)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm9, 64(%r9)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm10, 64(%r9)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm3, (%r9)
 ; AVX2-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX2-SLOW-NEXT:    vmovdqa %ymm8, 96(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm7, 32(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm4, 64(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm6, 96(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm8, 32(%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm5, 64(%rax)
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm0, (%rax)
-; AVX2-SLOW-NEXT:    addq $1256, %rsp # imm = 0x4E8
+; AVX2-SLOW-NEXT:    addq $1272, %rsp # imm = 0x4F8
 ; AVX2-SLOW-NEXT:    vzeroupper
 ; AVX2-SLOW-NEXT:    retq
 ;
@@ -9268,7 +9294,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm20
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[0,2,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm21
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm16
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 544(%rdi), %ymm1
@@ -9279,7 +9305,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm2, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm17
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm21
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm12[2,2,2,2,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,2]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
@@ -9317,7 +9343,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm2, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm29
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm28
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 640(%rdi), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -9328,8 +9354,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3,4,5],ymm2[6],ymm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u>
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm3, %ymm2, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm16
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm17
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm29
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,6]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
@@ -9392,7 +9418,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, 288(%rdi), %ymm11, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm11, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm13 = ymm0[0,1,2],ymm13[3,4,5,6,7],ymm0[8,9,10],ymm13[11,12,13,14,15]
@@ -9427,13 +9453,13 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3,4,5,6,7],ymm3[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,5,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm28
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm15, %xmm3, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1],xmm0[2],xmm3[3],xmm0[4,5],xmm3[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm3, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm12[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[3],xmm3[4,5],xmm4[6],xmm3[7]
@@ -9448,9 +9474,9 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3],xmm2[4,5],xmm3[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
@@ -9458,7 +9484,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm26
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
@@ -9466,21 +9492,22 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm22
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm28
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[2,1,2,0,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm21
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm9 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm9[2,1,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[2,1,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[0,0,0,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm20
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[0,1,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm20
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm19
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm1
@@ -9491,179 +9518,181 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm2[0,0,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm19
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm18
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[2,1,2,0,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm17
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm16
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm0[1,2],xmm2[3],xmm0[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm4 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm4, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm17
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm8 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm8 = ymm2[0,1],mem[2],ymm2[3,4],mem[5],ymm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,8,9,u,u,0,1,12,13,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm5, %xmm8, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm6[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm2[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm16
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4],xmm7[5,6],xmm6[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm14 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm14 = mem[0,1],ymm2[2],mem[3],ymm2[4],mem[5,6],ymm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm2, %ymm14, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm24
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,5,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm3 = ymm3[0,1],mem[2],ymm3[3,4],mem[5],ymm3[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm3[2,1,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm15[0,0,0,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm4[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm12[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm13 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm13 = mem[0,1],ymm4[2],mem[3],ymm4[4],mem[5,6],ymm4[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm5, %ymm13, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm22
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,5,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm29, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm29, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm1, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm1, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm24
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm1[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm4[0,3,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm13[0,0,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm12[2,1,2,0,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm1[1,2],xmm4[3],xmm1[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm3 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[2,1,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,0,0,0,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm7[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm11[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm7[4],xmm6[5,6],xmm7[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm6, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm4 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm6[0,3,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm10[0,0,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm7[2,1,2,0,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1,2],xmm6[3],xmm4[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm1[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm2[0,3,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm11[0,0,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm10[2,1,2,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm2[2,1,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm9[0,0,0,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm3[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm23
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm6 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm6 = mem[0,1],ymm1[2],mem[3],ymm1[4],mem[5,6],ymm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm6, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm0[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[0,3,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm7[0,0,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[2,1,2,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm5 = mem[0,1],ymm2[2],mem[3],ymm2[4],mem[5,6],ymm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm2 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm5, %xmm2, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm5[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm15 = xmm4[0,1,2,3],xmm15[4],xmm4[5,6],xmm15[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,1,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[0,0,0,0,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm3[0,1,2,3,6,5,6,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm1[0,1,2,3],xmm14[4],xmm1[5,6],xmm14[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm4 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm0[0,1,2],ymm15[3,4,5,6,7],ymm0[8,9,10],ymm15[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5,6,7],ymm0[8,9,10],ymm14[11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm23, %zmm29, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm25, %zmm26
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm23, %zmm29, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm24, %zmm25
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,2,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm9[0,1,2,3],xmm1[4],xmm9[5,6],xmm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,1,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm0[0],xmm9[1,2],xmm0[3],xmm9[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm15, %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm15[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,2,0,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm14
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[1,1,1,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,5,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm14[0,1,2,3],xmm2[4],xmm14[5,6],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm28
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm0[0,1,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm14[1,2],xmm2[3],xmm14[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm0[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm12[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm15[1,1,1,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm12[0,1,2,3],xmm2[4],xmm12[5,6],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm12, %ymm13, %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm13[0,1,2],ymm2[3,4,5,6,7],ymm13[8,9,10],ymm2[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,7,4,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm27, %zmm0, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm26, %zmm0, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm28, %zmm29, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm24, %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm10[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm11[0,1,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm9[1,1,1,1,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm15[4],xmm8[5,6],xmm15[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm14, %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm14[0,1,2],ymm8[3,4,5,6,7],ymm14[8,9,10],ymm8[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,7,4,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm14 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm14 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm28, %zmm0, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm27, %zmm0, %zmm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm29, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm9, %zmm25, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm12[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm13[0,1,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm8[1,2],xmm1[3],xmm8[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm11[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,2,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm8[4],xmm3[5,6],xmm8[7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm3, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm7[3,1,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm10[0,1,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm7[1,2],xmm3[3],xmm7[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm6, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm0[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm4, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm5[0,1,2,3,7,5,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,2,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4],xmm2[5,6],xmm4[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm8[0,1,2,3],xmm2[4],xmm8[5,6],xmm2[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,4,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm29, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm25, %zmm28
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[3,1,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm7[0,1,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1,2],xmm2[3],xmm6[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm5, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm5[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm12, %ymm1, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,1,1,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm28
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm29, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm24, %zmm28
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm2 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
@@ -9678,10 +9707,10 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[0,3,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm4[0,1,0,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm23
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm22
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm5, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm5, %xmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm18
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5],xmm4[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
@@ -9716,16 +9745,16 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm4[0,1,0,2,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm21
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm6, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm6, %xmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm27
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4],xmm0[5],xmm4[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm6, %ymm5, %ymm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm22
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm26
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4],ymm0[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm25
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm24
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm5 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
@@ -9749,11 +9778,11 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm3 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm10
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm11
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm3[0,3,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm9[0,1,0,2,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm10, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm11, %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm5[4],xmm3[5],xmm5[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm3, %zmm31
@@ -9765,37 +9794,37 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm5, %xmm11
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm5, %xmm10
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[0,3,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[0,1,0,2,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm11[4],xmm1[5],xmm11[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm10[4],xmm1[5],xmm10[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm7, %ymm11
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm7, %ymm10
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3,4],ymm1[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3,4],ymm1[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm16
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm13[1,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm2[0],xmm11[1],xmm2[2,3],xmm11[4],xmm2[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm13[1,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm2[0],xmm10[1],xmm2[2,3],xmm10[4],xmm2[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm1[0,1,1,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,3,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0,1,2,3],xmm0[4],xmm13[5],xmm0[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm11, %zmm0, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm10, %zmm0, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm3, %xmm8, %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm19[1,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm11[1],xmm8[2,3],xmm11[4],xmm8[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm19[1,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm10[1],xmm8[2,3],xmm10[4],xmm8[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm11, %ymm0, %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm10, %ymm0, %ymm13
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $236, %ymm29, %ymm13, %ymm8
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm8, %zmm1 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm0
@@ -9805,31 +9834,31 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,3,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm13[0,1,2,3],xmm8[4],xmm13[5],xmm8[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm0, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm8[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm11, %ymm8, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm3, %xmm15, %xmm11
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm10, %ymm8, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm3, %xmm15, %xmm10
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[1,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm14[1],xmm11[2,3],xmm14[4],xmm11[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $236, %ymm29, %ymm8, %ymm11
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm14[1],xmm10[2,3],xmm14[4],xmm10[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $236, %ymm29, %ymm8, %ymm10
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm3, %xmm6, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm12[1,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1],xmm3[2,3],xmm6[4],xmm3[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm25, %zmm0, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm6
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm8
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm10
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm11, %xmm11
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,1,1,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm10[4],xmm9[5],xmm10[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3],xmm11[4],xmm9[5],xmm11[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm9, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm11, %zmm3 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm10, %zmm3 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm7, %ymm7
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,1,3,4,5,6,7]
@@ -9866,11 +9895,11 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm31, %zmm4, %zmm8
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm4, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm4, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, 64(%rcx)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, 64(%rcx)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm1, (%rcx)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, 64(%r8)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, (%r8)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, (%r8)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 64(%r9)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, (%r9)
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
@@ -9882,7 +9911,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512F-ONLY-FAST-LABEL: load_i16_stride6_vf64:
 ; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $1384, %rsp # imm = 0x568
+; AVX512F-ONLY-FAST-NEXT:    subq $1512, %rsp # imm = 0x5E8
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,12,13,u,u,4,5,u,u,u,u,12,13,14,15>
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 608(%rdi), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -9902,11 +9931,11 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 512(%rdi), %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm15, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,4,5,u,u,u,u,8,9,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm15, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <0,1,4,5,4,5,u,u,0,1,12,13,u,u,4,5>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm22
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
@@ -9916,9 +9945,9 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm12, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm14, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[2,1,0,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm23
@@ -9938,11 +9967,11 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 704(%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm9, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm2, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm27
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm12, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm25
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 640(%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -9953,8 +9982,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3,4,5],ymm2[6],ymm1[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm2, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm29
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,6]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
@@ -9972,11 +10001,11 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 160(%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm29
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm5, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm6, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm6, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3],xmm4[4,5],xmm1[6],xmm4[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
@@ -9994,36 +10023,36 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm4[2,3],mem[2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm4[0],ymm1[1],ymm4[2,3,4,5],ymm1[6],ymm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm28
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm31
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm11, %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 352(%rdi), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%rdi), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm4, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm8
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm8, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2],xmm3[3],xmm14[4,5],xmm3[6],xmm14[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm3[0,1,2],xmm9[3],xmm3[4,5],xmm9[6],xmm3[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, 288(%rdi), %ymm3, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5,6,7],ymm0[8,9,10],ymm14[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm0[0,1,2],ymm9[3,4,5,6,7],ymm0[8,9,10],ymm9[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm9[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm13, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <2,3,14,15,u,u,6,7,u,u,u,u,12,13,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm13, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,u,u,2,3,u,u,10,11,14,15,u,u,u,u>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm7, %xmm7
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm0[0,1],xmm7[2],xmm0[3],xmm7[4,5],xmm0[6,7]
@@ -10034,15 +10063,15 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
 ; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm7, %zmm5, %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm2, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm10, %xmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2],xmm2[3],xmm5[4,5],xmm2[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [2,3,14,15,10,11,10,11,14,15,10,11,10,11,6,7,18,19,30,31,26,27,26,27,30,31,26,27,26,27,22,23]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm11, %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm6[3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm8, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <2,3,14,15,10,11,10,11,14,15,10,11,u,u,6,7,18,19,30,31,26,27,26,27,30,31,26,27,u,u,22,23>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm3
@@ -10050,9 +10079,9 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7],ymm3[8,9,10],ymm1[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,5,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm30
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm3, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6,7]
@@ -10065,26 +10094,26 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm12, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm14, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm23, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm4, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3],xmm4[4,5],xmm3[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm29, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm27, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm25, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm9[0,1,2,3,5,5,5,5]
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm12[0,1,2,3,5,5,5,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5],xmm2[6],xmm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm28
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm13 = xmm0[2,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,0,1,4,5,u,u,12,13,12,13,12,13,12,13>
@@ -10092,23 +10121,23 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm14, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm13[2,1,2,0,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm29, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[2,1,0,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <0,1,0,1,0,1,0,1,u,u,8,9,12,13,u,u>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm4, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm22
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm24
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[0,1,2,1]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm4[0,1,2,3,6,5,6,4]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm22
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[2,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u>
@@ -10118,17 +10147,17 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm5[2,1,2,0,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm21
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm28, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm30, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm5[0,1],ymm2[2],ymm5[3],ymm2[4],ymm5[5,6],ymm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm31, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm6 = mem[0,1],ymm2[2],mem[3],ymm2[4],mem[5,6],ymm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm19
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu (%rsp), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm6
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm1[2,1,0,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm7, %xmm1
@@ -10139,24 +10168,25 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm6[4],xmm1[5,6],xmm6[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $107, (%rsp), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3],ymm1[4],mem[5,6],ymm1[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm1, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm8, %ymm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm8, %ymm26
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm16
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,5,4]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm27, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm26 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm26, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm29, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm4, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm25
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm4 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm4 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm5[0,3,2,1]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm0
@@ -10190,8 +10220,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm2[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,1,0,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm4, %xmm1
@@ -10201,15 +10231,15 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3],ymm1[4],mem[5,6],ymm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm11
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm15 = ymm11[0,1,2],ymm15[3,4,5,6,7],ymm11[8,9,10],ymm15[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,6,5,4]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm15[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm23, %zmm27, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm26, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm23, %zmm29, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm25, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm14, %xmm0
@@ -10217,9 +10247,9 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm13[3,1,2,1,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm0[1,2],xmm11[3],xmm0[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,3,2,3,2,3,2,3,u,u,10,11,14,15,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm22, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm15
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm24, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm22, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm0[0,1,2,3,7,5,6,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0,1,2,3],xmm14[4],xmm15[5,6],xmm14[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
@@ -10248,16 +10278,19 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm14[0,1,2],ymm11[3,4,5,6,7],ymm14[8,9,10],ymm11[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,7,4,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm14[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm24, %zmm27, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm26, %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm24, %zmm29, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm25, %zmm26
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm23, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm11 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm11 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm28, %zmm0, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[3,1,2,1,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm10[0],xmm0[1,2],xmm10[3],xmm0[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm8, %xmm8
@@ -10280,215 +10313,221 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3,4,5,6,7],ymm1[8,9,10],ymm2[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm27, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm14, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm28
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm29, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm25, %zmm28
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm11 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm11, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm13[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm29, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm10 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm10, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # ymm2 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm2[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm3, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm5, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm4, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm31
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm3, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm4, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm27
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm28, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0],ymm2[1],ymm1[2,3,4,5],ymm2[6],ymm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm12 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm12 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm14[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,4,5,0,1,12,13,24,25,20,21,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm22
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm27, %ymm3, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm30
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm31, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm4 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm11 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm11, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm12[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,4,5,0,1,12,13,24,25,20,21,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm3, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    movw $31, %ax
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm2, %zmm29 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm4 = ymm2[0],mem[1],ymm2[2,3,4,5],mem[6],ymm2[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu (%rsp), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm2 = ymm2[0],mem[1],ymm2[2,3],mem[4],ymm2[5,6],mem[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm3, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm20
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm30 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, (%rsp), %ymm0, %ymm4 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm4 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm0 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm3, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm21
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm5, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm23
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm28 = [0,1,2,3,4,9,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm4[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm21
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm2, %ymm28, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm4 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm4 = ymm2[0],mem[1],ymm2[2,3,4,5],mem[6],ymm2[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm15 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm15 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm15, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4],xmm3[5],xmm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm4 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm14 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm14, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm5[2,2,2,2,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm19
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm27, %ymm1, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm5 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm1[2,2,2,2,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm1, %xmm17
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm3 = ymm1[0],mem[1],ymm1[2,3],mem[4],ymm1[5,6],mem[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm3[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm6, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm4, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm16
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm7, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5],xmm3[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm3, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm2, %zmm30 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm5 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[2,2,2,2,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm0, %xmm17
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm8 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm8 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm2 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm2[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm13, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm9, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4],xmm4[5],xmm2[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm31
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm31 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm7 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm7 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm0 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm6, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[0,3,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm4, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4],xmm0[5],xmm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm8[8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm2, %ymm28, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm14, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm12, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm12[0],xmm9[1],xmm12[2,3],xmm9[4],xmm12[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm6, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[0,3,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm4, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm8[4],xmm1[5],xmm8[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3,4],ymm1[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm12, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm11, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm11[0],xmm8[1],xmm11[2,3],xmm8[4],xmm11[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm22
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm27, %ymm12, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm13, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm11[0],xmm12[1],xmm11[2,3],xmm12[4],xmm11[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm24, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm31, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm13, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0,1,2,3],xmm0[4],xmm13[5],xmm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm11, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm10[0],xmm11[1],xmm10[2,3],xmm11[4],xmm10[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm25, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm27, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm15[0,1,2,3],xmm0[4],xmm15[5],xmm0[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm9, %zmm31 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm20, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm8, %zmm2 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm23, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm9[0,1,2,3],xmm1[4],xmm9[5],xmm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm0, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm1, %ymm28, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm8[0,1,2,3],xmm1[4],xmm8[5],xmm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm15[0,1,2,3,4],ymm1[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm15
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm19, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm15, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm15[0],xmm0[1],xmm15[2,3],xmm0[4],xmm15[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm27, %ymm1, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm17, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm5, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm16, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm7, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4],xmm5[5],xmm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm8, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm6, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm6[4],xmm4[5],xmm6[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm4, %ymm28, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm14, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm14[0],xmm0[1],xmm14[2,3],xmm0[4],xmm14[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm15, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm17, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm14, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm5, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0],xmm3[1],xmm5[2,3],xmm3[4],xmm5[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm13, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm9, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm9[0,1,2,3],xmm5[4],xmm9[5],xmm5[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm3, %zmm5, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm3 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm7, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm6, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm5[4],xmm4[5],xmm5[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm6 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    movw $-2048, %ax # imm = 0xF800
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm8, %zmm6 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, (%rsi)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm6 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm8, %zmm6 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 64(%rsi)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm6, %zmm5 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, (%rsi)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm6, %zmm5 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 64(%rsi)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm6 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm5, %zmm6 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 64(%rdx)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm6, %zmm5 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 64(%rdx)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm2, %zmm5 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm4, %zmm5 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, (%rdx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm29, %zmm2, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm30, %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm31, %zmm2, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm1, 64(%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm1, (%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, 64(%r8)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, (%r8)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 64(%r9)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, (%r9)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm30, %zmm4, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm31, %zmm4, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm4, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm4, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm2, 64(%rcx)
+; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm2, (%rcx)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, 64(%r8)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, (%r8)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 64(%r9)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, (%r9)
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $1384, %rsp # imm = 0x568
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
+; AVX512F-ONLY-FAST-NEXT:    addq $1512, %rsp # imm = 0x5E8
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
@@ -10707,20 +10746,21 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm1[0,0,2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm20
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,6,6,6]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[2,1,2,0,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm22
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm9 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm9 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm9[2,1,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[2,1,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[0,0,0,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm23
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[0,1,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,6,5,6,4]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm23
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm24
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6],xmm2[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm1
@@ -10731,10 +10771,10 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm2[0,0,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm24
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm25
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[2,1,2,0,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm25
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm16
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm0[1,2],xmm2[3],xmm0[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm3
@@ -10743,159 +10783,161 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm4, %ymm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm17
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm8 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm8 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,u,8,9,u,u,0,1,12,13,u,u>
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm4, %xmm8, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm5[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,6,5,6,4]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm16
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4],xmm6[5,6],xmm5[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm6[0,1],ymm2[2],ymm6[3],ymm2[4],ymm6[5,6],ymm2[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm2, %ymm14, %ymm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm27
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3,4,5,6,7],ymm6[8,9,10],ymm5[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,6,5,4]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm3 = mem[0,1],ymm3[2],mem[3,4],ymm3[5],mem[6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm3[2,1,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm15[0,0,0,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm4[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm12[0,1,2,3,6,5,6,4]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5,6],xmm4[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm5
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm5[0,1],ymm4[2],ymm5[3],ymm4[4],ymm5[5,6],ymm4[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm5, %ymm13, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm27
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,5,4]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm21, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm21, %zmm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm1, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm1, %zmm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm1[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm3[0,3,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm13[0,0,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm12[2,1,2,0,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm1[1,2],xmm3[3],xmm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm7 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm7 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm7[2,1,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,0,0,0,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm6[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm11[0,1,2,3,6,5,6,4]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4],xmm5[5,6],xmm6[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm5, %zmm19
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm3 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm5[0,3,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm10[0,0,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm6[2,1,2,0,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0],xmm3[1,2],xmm5[3],xmm3[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm1[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm2[0,3,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm11[0,0,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm10[2,1,2,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm2 = mem[0,1],ymm2[2],mem[3,4],ymm2[5],mem[6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm2[2,1,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm9[0,0,0,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm3[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,6,5,6,4]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5,6],xmm3[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm19
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm5 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm5 = mem[0,1],ymm1[2],mem[3],ymm1[4],mem[5,6],ymm1[7]
+; AVX512DQ-SLOW-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[0,3,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm7[0,0,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,3,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[2,1,2,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm5 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm5 = mem[0,1],ymm2[2],mem[3],ymm2[4],mem[5,6],ymm2[7]
 ; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm0[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm2 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm4, %xmm2, %xmm3
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm4[0,1,2,3,6,5,6,4]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm15 = xmm3[0,1,2,3],xmm15[4],xmm3[5,6],xmm15[7]
+; AVX512DQ-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm1 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[2,1,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[0,0,0,0,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm3[0,1,2,3,6,5,6,4]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm1[0,1,2,3],xmm14[4],xmm1[5,6],xmm14[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm3 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512DQ-SLOW-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm1 = mem[0,1],ymm0[2],mem[3],ymm0[4],mem[5,6],ymm0[7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm0[0,1,2],ymm15[3,4,5,6,7],ymm0[8,9,10],ymm15[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm0[0,1,2],ymm14[3,4,5,6,7],ymm0[8,9,10],ymm14[11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,5,4]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm14[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm28
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm19, %zmm21, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm18, %zmm28
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm19, %zmm21, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm18, %zmm28
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm0
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,2,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm9[0,1,2,3],xmm1[4],xmm9[5,6],xmm1[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,5]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm14
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[1,1,1,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,5,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm14[0,1,2,3],xmm2[4],xmm14[5,6],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm20
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[3,1,2,1,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm9
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,1,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm0[0],xmm9[1,2],xmm0[3],xmm9[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm15
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm15, %ymm15
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm15[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm15[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm15
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,7,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,2,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm0[0,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm14[1,2],xmm2[3],xmm14[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [6,7,2,3,14,15,14,15,14,15,2,3,14,15,10,11,22,23,18,19,30,31,30,31,30,31,18,19,30,31,26,27]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm14, %ymm0, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm0[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm12[0,1,2,3,7,5,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm15[1,1,1,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm12[0,1,2,3],xmm2[4],xmm12[5,6],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm12, %ymm13, %ymm13
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm13[0,1,2],ymm2[3,4,5,6,7],ymm13[8,9,10],ymm2[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,7,4,5]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm27
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm20, %zmm21, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm18, %zmm27
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm10[3,1,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm11[0,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,7,5,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm9[1,1,1,1,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm15[4],xmm8[5,6],xmm15[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = <6,7,2,3,14,15,14,15,14,15,10,11,u,u,2,3,22,23,18,19,30,31,30,31,30,31,26,27,u,u,18,19>
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm15, %ymm14, %ymm14
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm14[0,1,2],ymm8[3,4,5,6,7],ymm14[8,9,10],ymm8[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,7,4,5]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm27
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm21, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm9, %zmm18, %zmm27
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm12[3,1,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm13[0,1,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm8[1,2],xmm1[3],xmm8[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm11[0,1,2,3,7,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,2,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm8[4],xmm7[5,6],xmm8[7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm7, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[3,1,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm10[0,1,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm7[1,2],xmm6[3],xmm7[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3,4],xmm0[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm15, %ymm3, %ymm3
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,2,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4],xmm2[5,6],xmm4[7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm8[0,1,2,3],xmm2[4],xmm8[5,6],xmm2[7]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,4,5]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm20
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm21, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm18, %zmm20
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[3,1,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm7[0,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm6[1,2],xmm2[3],xmm6[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm14, %ymm5, %ymm5
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm5[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm5[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm12, %ymm1, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,1,1,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5,6],xmm3[7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3,4,5,6,7],ymm1[8,9,10],ymm3[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm20
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm21, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm18, %zmm20
 ; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    # ymm3 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
@@ -11165,7 +11207,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0],ymm5[1],ymm2[2,3,4,5],ymm5[6],ymm2[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <0,1,12,13,8,9,12,13,8,9,12,13,4,5,u,u,16,17,28,29,24,25,28,29,24,25,28,29,20,21,u,u>
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm5, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm26
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm27
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm18
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3,4,5,6,7],ymm2[8,9,10],ymm1[11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,6]
@@ -11189,7 +11231,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa 160(%rdi), %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %ymm2
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm29
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm30
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm31
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm5, %xmm1
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm10
@@ -11209,16 +11251,16 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2],xmm0[3],xmm4[4,5],xmm0[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %ymm4
 ; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm4[2,3],mem[2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm4, %ymm4
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm4[0],ymm1[1],ymm4[2,3,4,5],ymm1[6],ymm4[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm27
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm11, %ymm4
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm0[0,1,2],ymm4[3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 352(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 320(%rdi), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm8, %xmm13
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm4
@@ -11226,11 +11268,11 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm3[0,1,2],xmm13[3],xmm3[4,5],xmm13[6],xmm3[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm3
 ; AVX512DQ-FAST-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, 288(%rdi), %ymm3, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2,3,4,5],ymm1[6],ymm0[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm26
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm0[0,1,2],ymm13[3,4,5,6,7],ymm0[8,9,10],ymm13[11,12,13,14,15]
@@ -11310,7 +11352,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm14, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm13[2,1,2,0,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
@@ -11325,8 +11367,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm4
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm1 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[2,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,0,1,4,5,u,u,12,13,u,u,u,u,u,u>
@@ -11336,7 +11378,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm5[2,1,2,0,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm5, %xmm23
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm6 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # ymm6 = mem[0,1],ymm2[2],mem[3],ymm2[4],mem[5,6],ymm2[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [4,5,0,1,12,13,14,15,8,9,0,1,12,13,8,9,20,21,16,17,28,29,30,31,24,25,16,17,28,29,24,25]
@@ -11344,8 +11386,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm25
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu (%rsp), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $219, (%rsp), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm1[2,1,0,3]
@@ -11356,7 +11398,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm7, %xmm17
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm6[4],xmm1[5,6],xmm6[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm6
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd $107, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2],mem[3],ymm1[4],mem[5,6],ymm1[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <4,5,0,1,12,13,14,15,8,9,12,13,0,1,u,u,20,21,16,17,28,29,30,31,24,25,28,29,16,17,u,u>
@@ -11369,12 +11411,13 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm21, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm26 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm26, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm4, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm27
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm4 = mem[0,1],ymm1[2],mem[3,4],ymm1[5],mem[6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm4 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm4 = ymm1[0,1],mem[2],ymm1[3,4],mem[5],ymm1[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm5[0,3,2,1]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm0
@@ -11393,8 +11436,8 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm4, %zmm19
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm0 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm0 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm4
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[0,3,2,1]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm7, %xmm3
@@ -11427,7 +11470,7 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm15[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm2
 ; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm19, %zmm21, %zmm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm26, %zmm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm27, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,2,3,6,7,u,u,14,15,14,15,14,15,14,15>
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm14, %xmm0
@@ -11466,9 +11509,9 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm14[0,1,2],ymm11[3,4,5,6,7],ymm14[8,9,10],ymm11[11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,7,4,5]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm14[0,1,2,3],ymm11[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm30
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm29
 ; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm20, %zmm21, %zmm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm26, %zmm30
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm27, %zmm29
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm19, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[3,1,2,1,4,5,6,7]
@@ -11495,194 +11538,197 @@ define void @load_i16_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm20
 ; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm21, %zmm5
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm26, %zmm20
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm27, %zmm20
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm9 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm10 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <8,9,u,u,0,1,12,13,u,u,12,13,12,13,14,15>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm9, %xmm0
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm14
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm14[2,2,2,2,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm10, %xmm0
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm15
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm15[2,2,2,2,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5,6],ymm3[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm4, %xmm31
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm4
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4],xmm4[5],xmm3[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm3, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,u,u,u,8,9,u,u,0,1,12,13>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm3, %xmm31
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,2,3,0,1,4,5,u,u,12,13,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm5, %xmm30
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm22
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # ymm5 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm12 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm12 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm12, %xmm0
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm13
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm13[2,2,2,2,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,4,5,0,1,12,13,24,25,20,21,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm27
-; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm4, %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm11 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm11 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm11, %xmm0
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm13
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm13[2,2,2,2,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3],xmm2[4],xmm0[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm28 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,4,5,0,1,12,13,24,25,20,21,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm18
+; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm28, %ymm3, %ymm0
 ; AVX512DQ-FAST-NEXT:    movw $31, %ax
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm0, %zmm0, %zmm23 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm0, %zmm0, %zmm22 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # ymm5 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
-; AVX512DQ-FAST-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $146, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd $146, (%rsp), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # ymm0 = ymm0[0],mem[1],ymm0[2,3],mem[4],ymm0[5,6],mem[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm4, %xmm21
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm6, %xmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm6, %xmm22
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4],xmm4[5],xmm0[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm28 = [0,1,2,3,4,9,10,11]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm3, %xmm19
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm6, %xmm27
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4],xmm3[5],xmm0[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [8,9,4,5,4,5,6,7,0,1,4,5,0,1,12,13,24,25,20,21,20,21,22,23,16,17,20,21,16,17,28,29]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm5, %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm26
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm18
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm28, %ymm4
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm24
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm5, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm25
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm21
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm23
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # ymm5 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm15 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm15 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm15, %xmm0
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm6
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm6[2,2,2,2,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm14 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm14 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm14, %xmm0
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm6[2,2,2,2,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm6, %xmm17
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm0[0],xmm4[1],xmm0[2,3],xmm4[4],xmm0[5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm16
-; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm0, %ymm4
+; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm28, %ymm0, %ymm3
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm6 = ymm0[0,1],mem[2],ymm0[3,4],mem[5],ymm0[6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd $219, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm6 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm6 = mem[0,1],ymm0[2],mem[3,4],ymm0[5],mem[6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm11
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm11[2,2,2,2,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5,6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm12
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm12[2,2,2,2,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm3 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm3 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm8
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm3[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm8, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm5
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4],xmm5[5],xmm3[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm3, %zmm25
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm4, %zmm0, %zmm25 {%k1}
+; AVX512DQ-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm2 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm8
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm8, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm7, %xmm5
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4],xmm5[5],xmm2[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm24
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm3, %zmm0, %zmm24 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpblendd $66, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # ymm5 = ymm0[0],mem[1],ymm0[2,3,4,5],mem[6],ymm0[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpblendd $109, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # ymm1 = mem[0],ymm0[1],mem[2,3],ymm0[4],mem[5,6],ymm0[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm1[0,3,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm3, %xmm1
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm2
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm28, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm26
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[0,3,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm1
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4],xmm1[5],xmm4[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm4
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm25
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,6,7,u,u,u,u,10,11,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm13, %xmm10
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm13, %xmm9
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <10,11,u,u,2,3,14,15,u,u,10,11,12,13,14,15>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm12, %xmm12
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm12[0],xmm10[1],xmm12[2,3],xmm10[4],xmm12[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm19
-; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm12, %ymm10
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm14, %xmm12
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm9, %xmm9
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm9[0],xmm12[1],xmm9[2,3],xmm12[4],xmm9[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm31, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm14, %xmm14
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm14[0,1,2,3],xmm2[4],xmm14[5],xmm2[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm27
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm10, %zmm0, %zmm27 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm21, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm22, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm10
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm10[0,1,2,3],xmm2[4],xmm10[5],xmm2[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm11, %xmm11
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm11[0],xmm9[1],xmm11[2,3],xmm9[4],xmm11[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,6,7,2,3,14,15,26,27,22,23,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm14
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm2, %ymm28, %ymm14
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm0, %zmm2
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm11
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm18
+; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm28, %ymm11, %ymm9
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm15, %xmm11
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm10, %xmm10
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm10[0],xmm11[1],xmm10[2,3],xmm11[4],xmm10[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,u,u,u,u,10,11,u,u,2,3,14,15>
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm31, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <0,1,2,3,2,3,6,7,u,u,14,15,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm30, %xmm15
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm15, %xmm15
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm15[0,1,2,3],xmm4[4],xmm15[5],xmm4[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm4, %zmm26
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm9, %zmm0, %zmm26 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm19, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm27, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm9
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm9[0,1,2,3],xmm4[4],xmm9[5],xmm4[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [10,11,6,7,4,5,6,7,6,7,6,7,2,3,14,15,26,27,22,23,20,21,22,23,22,23,22,23,18,19,30,31]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm0, %ymm15
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm15[0,1,2,3,4],ymm4[5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm16, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm14
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm0, %ymm14
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm15
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm15
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm17, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm15, %xmm15
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm15[0],xmm0[1],xmm15[2,3],xmm0[4],xmm15[5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm29, %ymm14, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm11, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm14, %xmm14
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm14[0],xmm0[1],xmm14[2,3],xmm0[4],xmm14[5,6,7]
+; AVX512DQ-FAST-NEXT:    vpternlogq $236, %ymm28, %ymm15, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm12, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm6, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0],xmm1[1],xmm6[2,3],xmm1[4],xmm6[5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm8, %xmm6
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm7, %xmm7
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm8, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm7, %xmm7
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4],xmm7[5],xmm6[6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
 ; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm6, %zmm1
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $0, %ymm0, %zmm0, %zmm1 {%k1}
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm5, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm4, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm3, %xmm3
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4],xmm3[5],xmm4[6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm3, %ymm28, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm5, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4],xmm2[5],xmm3[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm3, (%rsi)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm3, 64(%rsi)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm3, 64(%rdx)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm3, (%rdx)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm23, %zmm3, %zmm24
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm25, %zmm3, %zmm26
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm27, %zmm3, %zmm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm3, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, (%rsi)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, 64(%rsi)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, 64(%rdx)
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm2, (%rdx)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm22, %zmm2, %zmm23
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm24, %zmm2, %zmm25
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm26, %zmm2, %zmm4
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm2, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm1, 64(%rcx)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm1, (%rcx)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, 64(%r8)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, (%r8)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, 64(%r9)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, (%r9)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, (%r8)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, 64(%r9)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, (%r9)
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, 64(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, (%rax)
 ; AVX512DQ-FAST-NEXT:    addq $936, %rsp # imm = 0x3A8
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
index ce4ce2bdac403..abe5aa583f962 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
@@ -1866,20 +1866,19 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX1-ONLY-LABEL: load_i16_stride7_vf16:
 ; AVX1-ONLY:       # %bb.0:
-; AVX1-ONLY-NEXT:    subq $264, %rsp # imm = 0x108
-; AVX1-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vpsrld $16, %xmm1, %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, %xmm5
-; AVX1-ONLY-NEXT:    vmovdqa 160(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, %xmm7
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    subq $248, %rsp
+; AVX1-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpsrld $16, %xmm0, %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa 160(%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm12[2,2,3,3]
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX1-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm12
-; AVX1-ONLY-NEXT:    vpsrlq $16, %xmm12, %xmm1
-; AVX1-ONLY-NEXT:    vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpsrlq $16, %xmm1, %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa 144(%rdi), %xmm6
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa 208(%rdi), %xmm2
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm2, (%rsp) # 16-byte Spill
@@ -1891,8 +1890,8 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa 80(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
 ; AVX1-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm1[0,0,0,0]
@@ -1904,18 +1903,18 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm9, %ymm9
 ; AVX1-ONLY-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm4[2,2,3,3]
+; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm10
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm10[2,2,3,3]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm10, %xmm11
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm14 = xmm1[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,7,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm15 = xmm14[2],xmm13[2],xmm14[3],xmm13[3]
-; AVX1-ONLY-NEXT:    vmovaps 32(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps 32(%rdi), %xmm5
 ; AVX1-ONLY-NEXT:    vmovaps 48(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vinsertps {{.*#+}} xmm10 = zero,xmm1[2],xmm2[2],zero
+; AVX1-ONLY-NEXT:    vinsertps {{.*#+}} xmm10 = zero,xmm5[2],xmm2[2],zero
 ; AVX1-ONLY-NEXT:    vmovaps %xmm2, %xmm14
 ; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, %xmm13
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm15[0,1,2],xmm10[3,4],xmm15[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm15 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
 ; AVX1-ONLY-NEXT:    vandnps %ymm9, %ymm15, %ymm9
@@ -1924,12 +1923,13 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3,4],ymm0[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm12, %xmm13
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm12[0,1,2,3,4,5],xmm6[6],xmm12[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm3[0,1,2,3,4,5],xmm6[6],xmm3[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,0,3,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm9[0,1,2,3],xmm0[4,5,6,7]
@@ -1941,70 +1941,65 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpsrld $16, %xmm1, %xmm9
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm10 = xmm3[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, %xmm11
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm10 = xmm7[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpslldq {{.*#+}} xmm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpslldq {{.*#+}} xmm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm6[0,1,2,3,4,5]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,6],xmm10[7]
 ; AVX1-ONLY-NEXT:    vpsrld $16, %xmm8, %xmm10
-; AVX1-ONLY-NEXT:    vmovdqa %xmm8, %xmm3
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm10 = xmm13[4],xmm14[4],xmm13[5],xmm14[5],xmm13[6],xmm14[6],xmm13[7],xmm14[7]
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm10 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
 ; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[8,9,8,9,8,9,8,9,6,7,6,7,6,7,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm15 = xmm4[0],xmm0[1],xmm4[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm4, %xmm7
-; AVX1-ONLY-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm15 = xmm11[0],xmm0[1],xmm11[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm11, %xmm5
+; AVX1-ONLY-NEXT:    vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm15[1,0,3,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm15[0,1,2],xmm10[3,4],xmm15[5,6,7]
-; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm4 = [65535,65535,65535,65535,65535,0,0,0,0,65535,65535,65535,65535,65535,65535,65535]
-; AVX1-ONLY-NEXT:    vandnps %ymm9, %ymm4, %ymm9
-; AVX1-ONLY-NEXT:    vandps %ymm4, %ymm10, %ymm10
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, %ymm14
+; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,0,0,0,0,65535,65535,65535,65535,65535,65535,65535]
+; AVX1-ONLY-NEXT:    vandnps %ymm9, %ymm11, %ymm9
+; AVX1-ONLY-NEXT:    vandps %ymm11, %ymm10, %ymm10
 ; AVX1-ONLY-NEXT:    vorps %ymm9, %ymm10, %ymm9
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm10 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
-; AVX1-ONLY-NEXT:    vandnps %ymm10, %ymm4, %ymm10
-; AVX1-ONLY-NEXT:    vandps %ymm4, %ymm9, %ymm9
+; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm11 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
+; AVX1-ONLY-NEXT:    vandnps %ymm10, %ymm11, %ymm10
+; AVX1-ONLY-NEXT:    vandps %ymm11, %ymm9, %ymm9
 ; AVX1-ONLY-NEXT:    vorps %ymm10, %ymm9, %ymm9
 ; AVX1-ONLY-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vpsllq $16, %xmm5, %xmm9
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm4[4],xmm9[4],xmm4[5],xmm9[5],xmm4[6],xmm9[6],xmm4[7],xmm9[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm6[0,3,2,3]
+; AVX1-ONLY-NEXT:    vpsllq $16, %xmm4, %xmm9
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm13[4],xmm9[4],xmm13[5],xmm9[5],xmm13[6],xmm9[6],xmm13[7],xmm9[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm15[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,1,0,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm6[0,1],xmm10[2,3],xmm6[4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm3[0,1],xmm10[2,3],xmm3[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1,2,3],xmm9[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm10 = xmm12[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; AVX1-ONLY-NEXT:    vmovdqa %xmm12, %xmm13
+; AVX1-ONLY-NEXT:    vmovdqa %xmm12, %xmm4
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm2[0],xmm10[1],xmm2[1],xmm10[2],xmm2[2],xmm10[3],xmm2[3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, %xmm8
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5],xmm10[6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm1[2,2,2,2]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3,4,5],xmm10[6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm11, %xmm15
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm5[0,1,0,1]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, %xmm14
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm7[0,1,2,3,4,5],xmm10[6,7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm6, %xmm3
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm6[0,1,0,1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,6],xmm11[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm3[1,1,1,1]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm8[1,1,1,1]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm10, %ymm10
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm3[0,1,0,3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm6[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,4,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm12 = xmm2[2,2,3,3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm12 = xmm8[2,2,3,3]
 ; AVX1-ONLY-NEXT:    vpunpckhqdq {{.*#+}} xmm11 = xmm11[1],xmm12[1]
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm12[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm11[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vandnps %ymm10, %ymm14, %ymm10
-; AVX1-ONLY-NEXT:    vandps %ymm1, %ymm14, %ymm1
-; AVX1-ONLY-NEXT:    vmovaps %ymm14, %ymm7
+; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm0 = [65535,65535,65535,65535,65535,0,0,0,0,65535,65535,65535,65535,65535,65535,65535]
+; AVX1-ONLY-NEXT:    vandnps %ymm10, %ymm0, %ymm10
+; AVX1-ONLY-NEXT:    vandps %ymm0, %ymm1, %ymm1
 ; AVX1-ONLY-NEXT:    vorps %ymm1, %ymm10, %ymm1
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm9
 ; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
@@ -2012,38 +2007,37 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vandps %ymm0, %ymm1, %ymm1
 ; AVX1-ONLY-NEXT:    vorps %ymm1, %ymm9, %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm14[0],xmm6[1],xmm14[1],xmm6[2],xmm14[2],xmm6[3],xmm14[3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm5[0],xmm15[0],xmm5[1],xmm15[1],xmm5[2],xmm15[2],xmm5[3],xmm15[3]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,0,3,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm4[0,1,2,3,4,5],xmm0[6],xmm4[7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm13[0,1,2,3,4,5],xmm0[6],xmm13[7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,0,0,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,7,6,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm9[0],xmm1[1,2],xmm9[3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm13[4],xmm8[4],xmm13[5],xmm8[5],xmm13[6],xmm8[6],xmm13[7],xmm8[7]
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm9[2,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,0,0,0]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5],xmm10[6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm12[0,1,2,3,6,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,2,2,2]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm2[0],xmm3[1],xmm2[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, %xmm13
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm8[0],xmm6[1],xmm8[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[0,1,1,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,7,7,7,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1],xmm11[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm11 = xmm15[4],xmm3[4],xmm15[5],xmm3[5],xmm15[6],xmm3[6],xmm15[7],xmm3[7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm11 = xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm7, %xmm8
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,2,1]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,4,7,7]
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm11 = xmm11[4],xmm5[4],xmm11[5],xmm5[5],xmm11[6],xmm5[6],xmm11[7],xmm5[7]
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm11 = xmm11[4],xmm3[4],xmm11[5],xmm3[5],xmm11[6],xmm3[6],xmm11[7],xmm3[7]
 ; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,u,u,u,u,4,5,8,9,2,3]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm4, %xmm12
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm7, %xmm12
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm12, %ymm11, %ymm11
-; AVX1-ONLY-NEXT:    vandps %ymm7, %ymm10, %ymm10
-; AVX1-ONLY-NEXT:    vandnps %ymm11, %ymm7, %ymm11
+; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,0,0,0,0,65535,65535,65535,65535,65535,65535,65535]
+; AVX1-ONLY-NEXT:    vandps %ymm2, %ymm10, %ymm10
+; AVX1-ONLY-NEXT:    vandnps %ymm11, %ymm2, %ymm11
 ; AVX1-ONLY-NEXT:    vorps %ymm11, %ymm10, %ymm10
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
 ; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
@@ -2051,15 +2045,16 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vandps %ymm2, %ymm10, %ymm10
 ; AVX1-ONLY-NEXT:    vorps %ymm1, %ymm10, %ymm1
 ; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa %xmm6, %xmm7
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm6[1,1,1,1]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm14, %xmm5
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm14[2,3],xmm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm5, %xmm6
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm5[1,1,1,1]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm15, %xmm4
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm15[2,3],xmm1[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm0[0,3,2,3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, %xmm8
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, %xmm14
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,1,0,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm10 = xmm10[0],xmm6[0],xmm10[1],xmm6[1]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm13, %xmm5
+; AVX1-ONLY-NEXT:    vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm10[0],xmm1[1,2],xmm10[3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,4,7]
@@ -2069,54 +2064,54 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm0[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpsrlq $16, %xmm13, %xmm10
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm10[2,3],xmm9[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm15[0,1,0,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm8[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,4,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, %xmm12
-; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm10 = xmm10[2],xmm3[2],xmm10[3],xmm3[3]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm10 = xmm10[2],xmm12[2],xmm10[3],xmm12[3]
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm10 = xmm10[4],xmm3[4],xmm10[5],xmm3[5],xmm10[6],xmm3[6],xmm10[7],xmm3[7]
 ; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[u,u,u,u,u,u,u,u,0,1,4,5,8,9,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm4[2,3,2,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm7[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm10, %ymm10
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm10[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
-; AVX1-ONLY-NEXT:    vandnps %ymm1, %ymm4, %ymm1
-; AVX1-ONLY-NEXT:    vandps %ymm4, %ymm9, %ymm9
+; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm7 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
+; AVX1-ONLY-NEXT:    vandnps %ymm1, %ymm7, %ymm1
+; AVX1-ONLY-NEXT:    vandps %ymm7, %ymm9, %ymm9
 ; AVX1-ONLY-NEXT:    vorps %ymm1, %ymm9, %ymm1
 ; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3]
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3]
 ; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[8,9,8,9,8,9,8,9,6,7,6,7,6,7,6,7]
-; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm5, %xmm9
-; AVX1-ONLY-NEXT:    vmovdqa %xmm5, %xmm15
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm10 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX1-ONLY-NEXT:    vmovdqa %xmm7, %xmm5
+; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm4, %xmm9
+; AVX1-ONLY-NEXT:    vmovdqa %xmm4, %xmm7
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm10 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX1-ONLY-NEXT:    vmovdqa %xmm6, %xmm5
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm9[0,1,2],xmm1[3,4],xmm9[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovdqa (%rsp), %xmm4 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm6[0,1,2,3,4,5],xmm4[6],xmm6[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vmovdqa (%rsp), %xmm6 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm4[0,1,2,3,4,5],xmm6[6],xmm4[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,4,7,6]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm9[5,6,7]
 ; AVX1-ONLY-NEXT:    vpsrld $16, %xmm0, %xmm9
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, %xmm7
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm9[4],xmm2[4],xmm9[5],xmm2[5],xmm9[6],xmm2[6],xmm9[7],xmm2[7]
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm2, %xmm0
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm13[0],xmm14[0],xmm13[1],xmm14[1],xmm13[2],xmm14[2],xmm13[3],xmm14[3]
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm13[0],xmm15[0],xmm13[1],xmm15[1],xmm13[2],xmm15[2],xmm13[3],xmm15[3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,2,2,2]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm10[2,3],xmm9[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm12[0],xmm2[1],xmm12[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm8, %xmm2
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm12[0],xmm8[1],xmm12[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,5,4,7,7]
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm10 = xmm10[4],xmm3[4],xmm10[5],xmm3[5],xmm10[6],xmm3[6],xmm10[7],xmm3[7]
-; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[u,u,u,u,u,u,u,u,0,1,4,5,8,9,10,11]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,2,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,0,2]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm11 = xmm8[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm10, %ymm10
@@ -2127,18 +2122,18 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vandps %ymm10, %ymm9, %ymm9
 ; AVX1-ONLY-NEXT:    vorps %ymm1, %ymm9, %ymm9
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vinsertps $41, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = zero,xmm1[1],mem[0],zero
-; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm5 = xmm5[2],xmm15[2],xmm5[3],xmm15[3]
+; AVX1-ONLY-NEXT:    vinsertps {{.*#+}} xmm1 = zero,xmm1[1],xmm14[1],zero
+; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm5 = xmm5[2],xmm7[2],xmm5[3],xmm7[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0,1,2],xmm1[3,4],xmm5[5,6,7]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm4[0],xmm6[1],xmm4[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm4[1],xmm6[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,4,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm14[1,1,1,1]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm15[1,1,1,1]
 ; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3],xmm6[4,5,6,7]
@@ -2168,7 +2163,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovaps %ymm9, (%rax)
 ; AVX1-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, (%rax)
-; AVX1-ONLY-NEXT:    addq $264, %rsp # imm = 0x108
+; AVX1-ONLY-NEXT:    addq $248, %rsp
 ; AVX1-ONLY-NEXT:    vzeroupper
 ; AVX1-ONLY-NEXT:    retq
 ;
@@ -2444,7 +2439,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <0,3,7,2,6,u,u,u>
 ; AVX2-FAST-NEXT:    vpermd %ymm12, %ymm13, %ymm13
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vpshufb %xmm14, %xmm12, %xmm15
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm12
 ; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[3,1,2,3,4,5,6,7]
@@ -2466,7 +2461,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm6
 ; AVX2-FAST-NEXT:    vpshufb %xmm14, %xmm6, %xmm6
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm15[10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm15[10,11,6,7,4,5,6,7,u,u,u,u,u,u,u,u]
 ; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm14[0],xmm6[0],xmm14[1],xmm6[1],xmm14[2],xmm6[2],xmm14[3],xmm6[3]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm2[0,1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7]
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <0,4,7,3,6,u,u,u>
@@ -2491,7 +2486,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpshufb %ymm12, %ymm2, %ymm2
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,10,11,6,7,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,10,11,6,7,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,6,7]
 ; AVX2-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4,5,6,7]
@@ -2521,7 +2516,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa 160(%rdi), %ymm6
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1,2],ymm4[3],ymm6[4,5],ymm4[6],ymm6[7]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm5, %xmm7
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,8,9,10,11,6,7,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,8,9,10,11,6,7,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,4,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7]
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
@@ -2608,7 +2603,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,0,1,14,15,u,u,10,11,24,25,24,25,24,25,24,25,16,17,30,31,u,u,26,27]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm12[6],ymm11[7,8,9,10,11,12,13],ymm12[14],ymm11[15]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm12 = ymm3[0,1],ymm2[2],ymm3[3,4],ymm2[5],ymm3[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm13 = <8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm13 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm13, %xmm12, %xmm14
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm12, %xmm12
 ; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[3,1,2,3,4,5,6,7]
@@ -2630,7 +2625,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm15 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm15, %xmm5
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm13, %xmm5, %xmm5
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm13 = xmm15[10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm13 = xmm15[10,11,6,7,4,5,6,7,u,u,u,u,u,u,u,u]
 ; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm13[0],xmm5[0],xmm13[1],xmm5[1],xmm13[2],xmm5[2],xmm13[3],xmm5[3]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm13 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm15 = ymm13[2,3,0,1]
@@ -2650,7 +2645,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm6[5,6,7],ymm4[8,9,10,11,12],ymm6[13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,10,11,6,7,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,10,11,6,7,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7]
@@ -2849,7 +2844,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [2,5,9,12,2,5,9,12]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [10,3,6,15,12,13,6,15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [10,3,6,15,12,13,6,15]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <2,6,9,u,13,u,u,u>
 ; AVX512F-FAST-NEXT:    vpermd %zmm0, %zmm2, %zmm10
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <2,5,9,u,12,u,u,u>
@@ -2879,69 +2874,71 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm15
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0,1,2,3],xmm15[4],xmm14[5],xmm15[6],xmm14[7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[0,1,0,1,14,15,12,13,10,11,8,9,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29]
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [0,1,2,15,0,1,2,15]
-; AVX512F-FAST-NEXT:    # ymm13 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermt2d %ymm15, %ymm13, %ymm14
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3,4,5,6],ymm13[7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[0,1,6,7,8,9,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm4[0,1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm7
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0],xmm15[1],xmm7[2,3,4,5],xmm15[6],xmm7[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[2,3,0,1,14,15,12,13,10,11],zero,zero,zero,zero,zero,zero,zero,zero,ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpor %ymm6, %ymm7, %ymm6
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm6[0],ymm14[1,2,3,4,5,6,7],ymm6[8],ymm14[9,10,11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm12[0,1,2],ymm11[3],ymm12[4,5],ymm11[6],ymm12[7]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm14
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm14[0],xmm7[1],xmm14[2,3,4,5],xmm7[6],xmm14[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[0,1,2,3,0,1,14,15,12,13,10,11,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm4[0,1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm15
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0],xmm14[1],xmm15[2,3,4,5],xmm14[6],xmm15[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[2,3,0,1,14,15,12,13,10,11],zero,zero,zero,zero,zero,zero,zero,zero,ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpor %ymm6, %ymm14, %ymm6
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm6[0],ymm13[1,2,3,4,5,6,7],ymm6[8],ymm13[9,10,11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm12[0,1,2],ymm11[3],ymm12[4,5],ymm11[6],ymm12[7]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm14
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm14[0],xmm13[1],xmm14[2,3,4,5],xmm13[6],xmm14[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[0,1,2,3,0,1,14,15,12,13,10,11,u,u,u,u]
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
 ; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm14 = [21474836482,21474836482,21474836482,21474836482]
 ; AVX512F-FAST-NEXT:    vpermd %ymm2, %ymm14, %ymm14
 ; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512F-FAST-NEXT:    vpermt2d %ymm14, %ymm13, %ymm7
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,6],ymm14[7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[2,3,4,5,10,11,16,17],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7]
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm15
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1],xmm14[2,3,4,5],xmm15[6],xmm14[7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[4,5,2,3,0,1,14,15,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpor %ymm10, %ymm14, %ymm10
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm10[0],ymm7[1,2,3,4,5,6,7],ymm10[8],ymm7[9,10,11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0],ymm12[1],ymm11[2,3],ymm12[4],ymm11[5,6,7]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm14
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0],xmm14[1],xmm7[2,3,4,5],xmm14[6],xmm7[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[0,1,4,5,2,3,0,1,14,15,12,13,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm10[0],ymm13[1,2,3,4,5,6,7],ymm10[8],ymm13[9,10,11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm11[0],ymm12[1],ymm11[2,3],ymm12[4],ymm11[5,6,7]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm14
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0],xmm14[1],xmm13[2,3,4,5],xmm14[6],xmm13[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[0,1,4,5,2,3,0,1,14,15,12,13,u,u,u,u]
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm2[0,1,1,3]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25]
-; AVX512F-FAST-NEXT:    vpermt2d %ymm15, %ymm13, %ymm7
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,6],ymm15[7]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm5[0],ymm4[1],ymm5[2,3,4],ymm4[5],ymm5[6,7]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm8
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm15[1],xmm8[2],xmm15[3],xmm8[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpermd %zmm0, %zmm17, %zmm15
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[4,5,10,11,0,1,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[6,7,4,5,2,3,0,1,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpor %ymm15, %ymm8, %ymm8
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm7
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0],xmm15[1],xmm7[2],xmm15[3],xmm7[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermd %zmm0, %zmm8, %zmm8
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[4,5,10,11,0,1,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[6,7,4,5,2,3,0,1,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpor %ymm7, %ymm8, %ymm7
 ; AVX512F-FAST-NEXT:    vpermd %zmm1, %zmm16, %zmm15
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <0,3,7,10,14,u,u,u>
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm8[0],ymm7[1,2,3,4,5,6,7],ymm8[8],ymm7[9,10,11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpermd %zmm0, %zmm16, %zmm8
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <0,3,7,10,14,u,u,u>
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm7[0],ymm13[1,2,3,4,5,6,7],ymm7[8],ymm13[9,10,11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm13[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermd %zmm0, %zmm8, %zmm8
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0],ymm12[1],ymm11[2,3,4],ymm12[5],ymm11[6,7]
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm12
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm12[0],xmm11[1],xmm12[2],xmm11[3],xmm12[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[0,1,6,7,4,5,2,3,0,1,14,15,u,u,u,u]
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
 ; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm14, %ymm9
-; AVX512F-FAST-NEXT:    vpermt2d %ymm9, %ymm13, %ymm11
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3,4,5,6],ymm9[7]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm9, %xmm13
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm9
-; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[3,1,2,3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm13[0],xmm9[0],xmm13[1],xmm9[1],xmm13[2],xmm9[2],xmm13[3],xmm9[3]
+; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm11, %xmm13
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm11
+; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[3,1,2,3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
 ; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm8, %ymm8
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm8[0],ymm11[1,2,3,4,5,6,7],ymm8[8],ymm11[9,10,11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm11[0,1],ymm8[2,3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm8[0],ymm9[1,2,3,4,5,6,7],ymm8[8],ymm9[9,10,11,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <0,3,3,u,0,3,7,u>
 ; AVX512F-FAST-NEXT:    vpermd %ymm2, %ymm9, %ymm9
@@ -2954,7 +2951,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <1,4,8,11,15,u,u,u>
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [2,6,9,13,2,6,9,13]
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <0,4,7,11,14,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[10,11,6,7,4,5,6,7,u,u,u,u,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3]
 ; AVX512F-FAST-NEXT:    vpermd %zmm0, %zmm16, %zmm12
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
@@ -2972,7 +2969,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7]
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u,u,10,11,6,7,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u,u,10,11,6,7,4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,4,6,7]
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5,6,7]
@@ -3867,32 +3864,33 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-LABEL: load_i16_stride7_vf32:
 ; AVX1-ONLY:       # %bb.0:
 ; AVX1-ONLY-NEXT:    subq $680, %rsp # imm = 0x2A8
-; AVX1-ONLY-NEXT:    vmovdqa 400(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vpsrld $16, %xmm9, %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa 384(%rdi), %xmm12
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm12[2,2,3,3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX1-ONLY-NEXT:    vmovdqa 352(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa 400(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vpsrld $16, %xmm1, %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, %xmm10
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpsrlq $16, %xmm1, %xmm1
-; AVX1-ONLY-NEXT:    vmovdqa 368(%rdi), %xmm13
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 384(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm11[2,2,3,3]
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX1-ONLY-NEXT:    vmovdqa 352(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vpsrlq $16, %xmm6, %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 368(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa 432(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovdqa 416(%rdi), %xmm15
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm15[0],xmm2[0],xmm15[1],xmm2[1],xmm15[2],xmm2[2],xmm15[3],xmm2[3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, %xmm14
+; AVX1-ONLY-NEXT:    vmovdqa 416(%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm12[0],xmm2[0],xmm12[1],xmm2[1],xmm12[2],xmm2[2],xmm12[3],xmm2[3]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa %xmm2, %xmm13
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5],xmm1[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 256(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 256(%rdi), %xmm15
 ; AVX1-ONLY-NEXT:    vmovaps 272(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertps {{.*#+}} xmm0 = zero,xmm2[2],xmm0[2],zero
+; AVX1-ONLY-NEXT:    vinsertps {{.*#+}} xmm0 = zero,xmm15[2],xmm0[2],zero
 ; AVX1-ONLY-NEXT:    vmovdqa 240(%rdi), %xmm2
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
@@ -3902,11 +3900,11 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,7]
 ; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm0[3,4],xmm2[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa 304(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vmovdqa 304(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 288(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm8, (%rsp) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
 ; AVX1-ONLY-NEXT:    vmovdqa 320(%rdi), %xmm3
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,0,0,0]
@@ -3916,9 +3914,9 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm6 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX1-ONLY-NEXT:    vandps %ymm6, %ymm2, %ymm2
-; AVX1-ONLY-NEXT:    vandnps %ymm3, %ymm6, %ymm3
+; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm5 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX1-ONLY-NEXT:    vandps %ymm5, %ymm2, %ymm2
+; AVX1-ONLY-NEXT:    vandnps %ymm3, %ymm5, %ymm3
 ; AVX1-ONLY-NEXT:    vorps %ymm3, %ymm2, %ymm2
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
@@ -3926,16 +3924,16 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpsrld $16, %xmm0, %xmm1
-; AVX1-ONLY-NEXT:    vmovdqa 160(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; AVX1-ONLY-NEXT:    vmovdqa 160(%rdi), %xmm14
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm14[2,2,3,3]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 ; AVX1-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpsrlq $16, %xmm0, %xmm2
-; AVX1-ONLY-NEXT:    vmovdqa 144(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; AVX1-ONLY-NEXT:    vmovdqa 144(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm9[4],xmm2[5],xmm9[5],xmm2[6],xmm9[6],xmm2[7],xmm9[7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm9, (%rsp) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa 208(%rdi), %xmm2
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -3944,12 +3942,13 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm1[0,1,2,3,4,5],xmm2[6,7]
-; AVX1-ONLY-NEXT:    vmovdqa 80(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5],xmm2[6,7]
+; AVX1-ONLY-NEXT:    vmovdqa 80(%rdi), %xmm3
 ; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm3, %xmm7
+; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[0,0,0,0]
@@ -3967,45 +3966,45 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[2,2,3,3]
 ; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; AVX1-ONLY-NEXT:    vmovaps 32(%rdi), %xmm11
-; AVX1-ONLY-NEXT:    vmovaps 48(%rdi), %xmm10
-; AVX1-ONLY-NEXT:    vinsertps {{.*#+}} xmm4 = zero,xmm11[2],xmm10[2],zero
-; AVX1-ONLY-NEXT:    vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 32(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 48(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertps {{.*#+}} xmm4 = zero,xmm4[2],xmm0[2],zero
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[3,4],xmm3[5,6,7]
-; AVX1-ONLY-NEXT:    vandnps %ymm2, %ymm6, %ymm2
-; AVX1-ONLY-NEXT:    vandps %ymm6, %ymm3, %ymm0
+; AVX1-ONLY-NEXT:    vandnps %ymm2, %ymm5, %ymm2
+; AVX1-ONLY-NEXT:    vandps %ymm5, %ymm3, %ymm0
 ; AVX1-ONLY-NEXT:    vorps %ymm2, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm12[4],xmm9[4],xmm12[5],xmm9[5],xmm12[6],xmm9[6],xmm12[7],xmm9[7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; AVX1-ONLY-NEXT:    vpblendw $191, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1,2,3,4,5],xmm13[6],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0,1,2,3,4,5],xmm10[6],xmm6[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,0,3,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpslld $16, %xmm14, %xmm1
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm15[0],xmm1[0],xmm15[1],xmm1[1],xmm15[2],xmm1[2],xmm15[3],xmm1[3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm15, %xmm12
-; AVX1-ONLY-NEXT:    vmovdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpslld $16, %xmm13, %xmm1
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm12[0],xmm1[0],xmm12[1],xmm1[1],xmm12[2],xmm1[2],xmm12[3],xmm1[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = xmm15[4],mem[4],xmm15[5],mem[5],xmm15[6],mem[6],xmm15[7],mem[7]
+; AVX1-ONLY-NEXT:    vmovaps %xmm15, %xmm12
+; AVX1-ONLY-NEXT:    vmovaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm2 = [8,9,8,9,8,9,8,9,6,7,6,7,6,7,6,7]
 ; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm2, %xmm5
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm13[0],xmm14[1],xmm13[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm15[0],xmm6[1],xmm15[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[1,0,3,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3,4],xmm2[5,6,7]
-; AVX1-ONLY-NEXT:    vpsrld $16, %xmm8, %xmm2
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrld $16, %xmm2, %xmm2
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm3[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
@@ -4020,24 +4019,22 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vandnps %ymm3, %ymm2, %ymm3
 ; AVX1-ONLY-NEXT:    vorps %ymm3, %ymm1, %ymm1
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
-; AVX1-ONLY-NEXT:    vandnps %ymm0, %ymm2, %ymm0
-; AVX1-ONLY-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm8 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
+; AVX1-ONLY-NEXT:    vandnps %ymm0, %ymm8, %ymm0
+; AVX1-ONLY-NEXT:    vandps %ymm1, %ymm8, %ymm1
 ; AVX1-ONLY-NEXT:    vorps %ymm0, %ymm1, %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm0 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm0 = xmm8[4],mem[4],xmm8[5],mem[5],xmm8[6],mem[6],xmm8[7],mem[7]
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm0 = xmm14[4],mem[4],xmm14[5],mem[5],xmm14[6],mem[6],xmm14[7],mem[7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $64, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = xmm1[0,1,2,3,4,5],mem[6],xmm1[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm14[0,1,2,3,4,5],xmm9[6],xmm14[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,0,3,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpslld $16, %xmm6, %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpslld $16, %xmm9, %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
@@ -4051,7 +4048,9 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpsrld $16, %xmm2, %xmm4
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm4 = xmm3[4],mem[4],xmm3[5],mem[5],xmm3[6],mem[6],xmm3[7],mem[7]
 ; AVX1-ONLY-NEXT:    vpshufb %xmm5, %xmm4, %xmm4
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpblendw $253, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm5 # 16-byte Folded Reload
@@ -4064,46 +4063,44 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vandps %ymm5, %ymm4, %ymm4
 ; AVX1-ONLY-NEXT:    vorps %ymm1, %ymm4, %ymm1
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
-; AVX1-ONLY-NEXT:    vandnps %ymm0, %ymm4, %ymm0
-; AVX1-ONLY-NEXT:    vandps %ymm4, %ymm1, %ymm1
+; AVX1-ONLY-NEXT:    vandnps %ymm0, %ymm8, %ymm0
+; AVX1-ONLY-NEXT:    vandps %ymm1, %ymm8, %ymm1
 ; AVX1-ONLY-NEXT:    vorps %ymm0, %ymm1, %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vpsllq $16, %xmm9, %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-ONLY-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,3,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsllq $16, %xmm0, %xmm0
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm10[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,0,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw $243, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1],xmm1[2,3],mem[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm12[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; AVX1-ONLY-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm8[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm15[0,1,0,3]
+; AVX1-ONLY-NEXT:    vpermilps {{.*#+}} xmm1 = xmm12[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm9[2,2,3,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm13[2,2,3,3]
 ; AVX1-ONLY-NEXT:    vpunpckhqdq {{.*#+}} xmm4 = xmm1[1],xmm4[1]
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3]
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa (%rsp), %xmm11 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm11[2,2,2,2]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm12[0,1,2,3,4,5],xmm5[6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm12[2,2,2,2]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm15[0,1,2,3,4,5],xmm5[6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm10[0,1,0,1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,6],xmm13[7]
 ; AVX1-ONLY-NEXT:    vpermilps $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm13 = mem[1,1,1,1]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm13, %ymm5, %ymm5
-; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm13 = [65535,65535,65535,65535,65535,0,0,0,0,65535,65535,65535,65535,65535,65535,65535]
-; AVX1-ONLY-NEXT:    vandps %ymm4, %ymm13, %ymm4
-; AVX1-ONLY-NEXT:    vandnps %ymm5, %ymm13, %ymm5
+; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm6 = [65535,65535,65535,65535,65535,0,0,0,0,65535,65535,65535,65535,65535,65535,65535]
+; AVX1-ONLY-NEXT:    vandps %ymm6, %ymm4, %ymm4
+; AVX1-ONLY-NEXT:    vandnps %ymm5, %ymm6, %ymm5
 ; AVX1-ONLY-NEXT:    vorps %ymm5, %ymm4, %ymm4
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm5 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
@@ -4113,16 +4110,16 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpsllq $16, %xmm0, %xmm0
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
-; AVX1-ONLY-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; AVX1-ONLY-NEXT:    vpshufd $236, (%rsp), %xmm4 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm4 = mem[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,0,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm8[0,1],xmm4[2,3],xmm8[4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm14[0,1],xmm4[2,3],xmm14[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm4[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1],xmm4[2],xmm9[2],xmm4[3],xmm9[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm0[0,1,2,3,4,5],xmm4[6,7]
 ; AVX1-ONLY-NEXT:    vpshufd $170, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm0 = mem[2,2,2,2]
@@ -4132,25 +4129,25 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm5[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[1,1,1,1]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm7[0,1,0,3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm9[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm2[2,2,3,3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm6[2,2,3,3]
 ; AVX1-ONLY-NEXT:    vpunpckhqdq {{.*#+}} xmm13 = xmm0[1],xmm13[1]
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm0 = xmm3[0],mem[0],xmm3[1],mem[1],xmm3[2],mem[2],xmm3[3],mem[3]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm14 = xmm0[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[0,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm13 = xmm14[0,1],xmm13[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm6 = [65535,65535,65535,65535,65535,0,0,0,0,65535,65535,65535,65535,65535,65535,65535]
-; AVX1-ONLY-NEXT:    vandnps %ymm5, %ymm6, %ymm5
-; AVX1-ONLY-NEXT:    vandps %ymm6, %ymm13, %ymm13
+; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,0,0,0,0,65535,65535,65535,65535,65535,65535,65535]
+; AVX1-ONLY-NEXT:    vandnps %ymm5, %ymm2, %ymm5
+; AVX1-ONLY-NEXT:    vandps %ymm2, %ymm13, %ymm13
 ; AVX1-ONLY-NEXT:    vorps %ymm5, %ymm13, %ymm5
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm3 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
-; AVX1-ONLY-NEXT:    vandnps %ymm4, %ymm3, %ymm4
-; AVX1-ONLY-NEXT:    vandps %ymm3, %ymm5, %ymm5
+; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm7 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
+; AVX1-ONLY-NEXT:    vandnps %ymm4, %ymm7, %ymm4
+; AVX1-ONLY-NEXT:    vandps %ymm7, %ymm5, %ymm5
 ; AVX1-ONLY-NEXT:    vorps %ymm4, %ymm5, %ymm3
 ; AVX1-ONLY-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
@@ -4158,51 +4155,50 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    # xmm4 = xmm3[0],mem[0],xmm3[1],mem[1],xmm3[2],mem[2],xmm3[3],mem[3]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,0,3,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $191, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = mem[0,1,2,3,4,5],xmm5[6],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw $64, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = xmm2[0,1,2,3,4,5],mem[6],xmm2[7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,0,0,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,6,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm4[1,2],xmm5[3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7]
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm4[2,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,0,0,0]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm13 = xmm5[0,1,2,3,4,5],xmm13[6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,2,2]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm9[0],xmm15[1],xmm9[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = xmm2[0],mem[1],xmm2[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,1,1,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,7,7,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm5[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm15[4],xmm12[4],xmm15[5],xmm12[5],xmm15[6],xmm12[6],xmm15[7],xmm12[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,2,1]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,7,7]
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm10, %xmm11
 ; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm5 = [4,5,2,3,4,5,6,7,8,9,4,5,8,9,2,3]
 ; AVX1-ONLY-NEXT:    vpshufb %xmm5, %xmm14, %xmm14
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm9, %xmm15
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm10, %xmm15
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm15, %ymm14, %ymm14
-; AVX1-ONLY-NEXT:    vandps %ymm6, %ymm1, %ymm1
-; AVX1-ONLY-NEXT:    vandnps %ymm14, %ymm6, %ymm14
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, %ymm9
+; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm15 = [65535,65535,65535,65535,65535,0,0,0,0,65535,65535,65535,65535,65535,65535,65535]
+; AVX1-ONLY-NEXT:    vandps %ymm1, %ymm15, %ymm1
+; AVX1-ONLY-NEXT:    vandnps %ymm14, %ymm15, %ymm14
 ; AVX1-ONLY-NEXT:    vorps %ymm1, %ymm14, %ymm1
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm13, %ymm0, %ymm13
-; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm12 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
-; AVX1-ONLY-NEXT:    vandnps %ymm13, %ymm12, %ymm13
-; AVX1-ONLY-NEXT:    vandps %ymm1, %ymm12, %ymm1
+; AVX1-ONLY-NEXT:    vandnps %ymm13, %ymm7, %ymm13
+; AVX1-ONLY-NEXT:    vandps %ymm7, %ymm1, %ymm1
 ; AVX1-ONLY-NEXT:    vorps %ymm1, %ymm13, %ymm1
 ; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = xmm8[0],mem[0],xmm8[1],mem[1],xmm8[2],mem[2],xmm8[3],mem[3]
+; AVX1-ONLY-NEXT:    vmovdqa (%rsp), %xmm12 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,0,3,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $191, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm13 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm13 = mem[0,1,2,3,4,5],xmm6[6],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw $191, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm13 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm13 = mem[0,1,2,3,4,5],xmm7[6],mem[7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,0,0,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,7,6,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0],xmm1[1,2],xmm13[3,4,5,6,7]
@@ -4214,27 +4210,29 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,5],xmm14[6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm14 = xmm2[0],xmm7[1],xmm2[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm14 = xmm6[0],xmm9[1],xmm6[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[0,1,1,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,7,7,7,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm14[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm14 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm14 = xmm10[4],mem[4],xmm10[5],mem[5],xmm10[6],mem[6],xmm10[7],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm14 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm14 = xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,1,2,1]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,4,7,7]
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm14 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm14 = xmm14[4],mem[4],xmm14[5],mem[5],xmm14[6],mem[6],xmm14[7],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm14[4],xmm11[4],xmm14[5],xmm11[5],xmm14[6],xmm11[6],xmm14[7],xmm11[7]
 ; AVX1-ONLY-NEXT:    vpshufb %xmm5, %xmm14, %xmm5
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm2, %xmm14
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm8, %xmm14
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm14, %ymm5, %ymm5
-; AVX1-ONLY-NEXT:    vandps %ymm0, %ymm9, %ymm0
-; AVX1-ONLY-NEXT:    vandnps %ymm5, %ymm9, %ymm2
+; AVX1-ONLY-NEXT:    vandps %ymm0, %ymm15, %ymm0
+; AVX1-ONLY-NEXT:    vandnps %ymm5, %ymm15, %ymm2
 ; AVX1-ONLY-NEXT:    vorps %ymm2, %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm13, %ymm0, %ymm2
-; AVX1-ONLY-NEXT:    vandnps %ymm2, %ymm12, %ymm2
-; AVX1-ONLY-NEXT:    vandps %ymm0, %ymm12, %ymm0
+; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm6 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
+; AVX1-ONLY-NEXT:    vandnps %ymm2, %ymm6, %ymm2
+; AVX1-ONLY-NEXT:    vandps %ymm6, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovaps %ymm6, %ymm5
 ; AVX1-ONLY-NEXT:    vorps %ymm2, %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
@@ -4249,68 +4247,67 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm4[0,1,2,1]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm2[6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm9, %xmm2
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm7[2,3,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm2, %xmm2
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm6[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrlq $16, %xmm5, %xmm4
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX1-ONLY-NEXT:    vpsrlq $16, %xmm3, %xmm4
+; AVX1-ONLY-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm4 = xmm4[0],mem[0],xmm4[1],mem[1],xmm4[2],mem[2],xmm4[3],mem[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm4 = mem[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,4,7]
-; AVX1-ONLY-NEXT:    vpunpckhdq (%rsp), %xmm4, %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm4 = xmm4[2],mem[2],xmm4[3],mem[3]
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm11[4],xmm4[5],xmm11[5],xmm4[6],xmm11[6],xmm4[7],xmm11[7]
-; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u,u,0,1,4,5,8,9,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm11[2,3,2,3]
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm4 = xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7]
+; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[12,13,14,15,4,5,6,7,0,1,4,5,8,9,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm10[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm13, %ymm4, %ymm4
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vandnps %ymm0, %ymm12, %ymm0
-; AVX1-ONLY-NEXT:    vandps %ymm2, %ymm12, %ymm2
+; AVX1-ONLY-NEXT:    vandnps %ymm0, %ymm5, %ymm0
+; AVX1-ONLY-NEXT:    vandps %ymm5, %ymm2, %ymm2
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, %ymm13
 ; AVX1-ONLY-NEXT:    vorps %ymm0, %ymm2, %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
-; AVX1-ONLY-NEXT:    vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm0 = xmm0[0,1],mem[2,3],xmm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm6[0,3,2,3]
+; AVX1-ONLY-NEXT:    vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm0 = mem[1,1,1,1]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm12[2,3],xmm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm7[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,0,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2],xmm2[3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm12, %xmm1
-; AVX1-ONLY-NEXT:    vpshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = mem[2,3,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm7, %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm5[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrlq $16, %xmm2, %xmm2
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
+; AVX1-ONLY-NEXT:    vpsrlq $16, %xmm9, %xmm2
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm10[0,1,0,3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm9[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm10[2],xmm2[3],xmm10[3]
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7]
-; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,0,1,4,5,8,9,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm13[2,3,2,3]
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
+; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[12,13,14,15,4,5,6,7,0,1,4,5,8,9,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm8[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
-; AVX1-ONLY-NEXT:    vandnps %ymm0, %ymm2, %ymm0
-; AVX1-ONLY-NEXT:    vandps %ymm2, %ymm1, %ymm1
+; AVX1-ONLY-NEXT:    vandnps %ymm0, %ymm13, %ymm0
+; AVX1-ONLY-NEXT:    vandps %ymm1, %ymm13, %ymm1
+; AVX1-ONLY-NEXT:    vmovaps %ymm13, %ymm8
 ; AVX1-ONLY-NEXT:    vorps %ymm0, %ymm1, %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
@@ -4319,78 +4316,78 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
-; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm4 = [8,9,8,9,8,9,8,9,6,7,6,7,6,7,6,7]
-; AVX1-ONLY-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
+; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[8,9,8,9,8,9,8,9,6,7,6,7,6,7,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3,4],xmm0[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $191, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1,2,3,4,5],xmm14[6],mem[7]
+; AVX1-ONLY-NEXT:    vpblendw $64, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = xmm14[0,1,2,3,4,5],mem[6],xmm14[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,7,6]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
-; AVX1-ONLY-NEXT:    vpsrld $16, %xmm7, %xmm1
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7]
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; AVX1-ONLY-NEXT:    vpsrld $16, %xmm6, %xmm1
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,2,2]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa (%rsp), %xmm2 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpblendw $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm2 = xmm2[0],mem[1],xmm2[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,7,7]
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm2 = xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7]
-; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,0,1,4,5,8,9,10,11]
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm11[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
-; AVX1-ONLY-NEXT:    vandnps %ymm0, %ymm2, %ymm0
-; AVX1-ONLY-NEXT:    vandps %ymm2, %ymm1, %ymm1
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, %ymm5
+; AVX1-ONLY-NEXT:    vandnps %ymm0, %ymm8, %ymm0
+; AVX1-ONLY-NEXT:    vandps %ymm1, %ymm8, %ymm1
 ; AVX1-ONLY-NEXT:    vorps %ymm0, %ymm1, %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm0 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm0 = xmm6[0],mem[0],xmm6[1],mem[1],xmm6[2],mem[2],xmm6[3],mem[3]
-; AVX1-ONLY-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm15, %xmm1
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX1-ONLY-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm0 = xmm12[0],mem[0],xmm12[1],mem[1],xmm12[2],mem[2],xmm12[3],mem[3]
+; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,8,9,8,9,6,7,6,7,6,7,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm1, %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm12[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0,1,2,3,4,5],xmm9[6],xmm6[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm8[0,1,2,3,4,5],xmm11[6],xmm8[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,7,6]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrld $16, %xmm1, %xmm1
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm12[4],xmm1[5],xmm12[5],xmm1[6],xmm12[6],xmm1[7],xmm12[7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm8, %xmm12
+; AVX1-ONLY-NEXT:    vpsrld $16, %xmm5, %xmm1
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,2,2]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm10[0],xmm8[1],xmm10[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm10[0],xmm9[1],xmm10[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,7,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm7[4],xmm2[5],xmm7[5],xmm2[6],xmm7[6],xmm2[7],xmm7[7]
-; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,u,0,1,4,5,8,9,10,11]
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm13[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-ONLY-NEXT:    vmovdqa %xmm13, %xmm10
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm3[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vandnps %ymm0, %ymm5, %ymm0
-; AVX1-ONLY-NEXT:    vandps %ymm5, %ymm1, %ymm1
-; AVX1-ONLY-NEXT:    vmovaps %ymm5, %ymm13
+; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
+; AVX1-ONLY-NEXT:    vandnps %ymm0, %ymm2, %ymm0
+; AVX1-ONLY-NEXT:    vandps %ymm2, %ymm1, %ymm1
 ; AVX1-ONLY-NEXT:    vorps %ymm0, %ymm1, %ymm0
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vinsertps $41, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
@@ -4399,15 +4396,13 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm2 = xmm2[2],mem[2],xmm2[3],mem[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3,4],xmm2[5,6,7]
-; AVX1-ONLY-NEXT:    vpblendw $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = xmm14[0],mem[1],xmm14[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw $253, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm2 = mem[0],xmm14[1],mem[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = mem[1,1,1,1]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm13[1,1,1,1]
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm15[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
@@ -4415,7 +4410,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3],xmm4[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpmovzxwd (%rsp), %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
@@ -4428,34 +4423,37 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vandnps %ymm1, %ymm13, %ymm1
-; AVX1-ONLY-NEXT:    vandps %ymm2, %ymm13, %ymm2
+; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
+; AVX1-ONLY-NEXT:    vandnps %ymm1, %ymm4, %ymm1
+; AVX1-ONLY-NEXT:    vandps %ymm4, %ymm2, %ymm2
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, %ymm13
 ; AVX1-ONLY-NEXT:    vorps %ymm1, %ymm2, %ymm1
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vinsertps $41, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm2 = zero,xmm2[1],mem[0],zero
-; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm4 = xmm11[2],xmm15[2],xmm11[3],xmm15[3]
+; AVX1-ONLY-NEXT:    vpunpckhdq (%rsp), %xmm12, %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm4 = xmm12[2],mem[2],xmm12[3],mem[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2],xmm2[3,4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm9[0],xmm6[1],xmm9[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm11[0],xmm8[1],xmm11[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,4,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm12[1,1,1,1]
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX1-ONLY-NEXT:    vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm4 = mem[1,1,1,1]
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = xmm3[4],mem[4],xmm3[5],mem[5],xmm3[6],mem[6],xmm3[7],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3],xmm5[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm8[0],xmm5[0],xmm8[1],xmm5[1],xmm8[2],xmm5[2],xmm8[3],xmm5[3]
+; AVX1-ONLY-NEXT:    vpmovzxwd {{.*#+}} xmm5 = xmm10[0],zero,xmm10[1],zero,xmm10[2],zero,xmm10[3],zero
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm7[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,4,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5],xmm6[6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm10[3,3,3,3]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[3,3,3,3]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
@@ -5142,7 +5140,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm5[0,1],ymm3[2],ymm5[3,4],ymm3[5],ymm5[6,7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm8[8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm8[8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm8
 ; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[3,1,2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
@@ -5170,7 +5168,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm1[0,1],ymm12[2],ymm1[3,4],ymm12[5],ymm1[6,7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm6[8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm6[8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm6
 ; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[3,1,2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
@@ -5192,9 +5190,9 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm8[0,1,2,3,4],ymm4[5,6,7],ymm8[8,9,10,11,12],ymm4[13,14,15]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm5[0,1],ymm3[2,3],ymm5[4,5],ymm3[6,7]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm6
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm6, %xmm6
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm0[0,1,2],ymm10[3],ymm0[4,5],ymm10[6],ymm0[7]
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <0,4,7,3,6,u,u,u>
@@ -5216,7 +5214,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm1[0,1],ymm12[2,3],ymm1[4,5],ymm12[6,7]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm7
 ; AVX2-FAST-NEXT:    vpshufb %xmm3, %xmm7, %xmm0
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
 ; AVX2-FAST-NEXT:    vmovdqa %ymm14, %ymm11
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm9[0,1,2],ymm14[3],ymm9[4,5],ymm14[6],ymm9[7]
@@ -5245,7 +5243,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpblendd $72, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm3 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm3 = ymm2[0,1,2],mem[3],ymm2[4,5],mem[6],ymm2[7]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm8
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,u,u,10,11,6,7,u,u,u,u>
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,2,3,4,5,6,7,10,11,6,7,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpshufb %xmm9, %xmm8, %xmm8
 ; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,4,6,7]
 ; AVX2-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
@@ -5533,7 +5531,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0,1],ymm11[2],ymm8[3,4],ymm11[5],ymm8[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm7 = xmm6[8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm7 = xmm6[8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm6, %xmm6
 ; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[3,1,2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
@@ -5568,7 +5566,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm6 = <8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm3, %xmm5
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm3, %xmm3
 ; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
@@ -5583,7 +5581,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0,1],ymm11[2,3],ymm8[4,5],ymm11[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm1, %xmm3
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm6 = <10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm6 = [10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm1, %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,2,3,0,1,14,15,12,13,26,27,26,27,26,27,26,27,18,19,16,17,30,31,28,29]
@@ -5618,7 +5616,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm8[5,6,7],ymm1[8,9,10,11,12],ymm8[13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1],ymm4[2,3],ymm2[4,5],ymm4[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm8, %xmm10
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm9 = xmm10[8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm9 = xmm10[8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm8, %xmm6
 ; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
@@ -5639,7 +5637,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd $183, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm7 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm7 = mem[0,1,2],ymm2[3],mem[4,5],ymm2[6],mem[7]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm7, %xmm8
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,u,u,10,11,6,7,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,2,3,4,5,6,7,10,11,6,7,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm8, %xmm8
 ; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
@@ -5716,19 +5714,19 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512F-ONLY-SLOW-LABEL: load_i16_stride7_vf32:
 ; AVX512F-ONLY-SLOW:       # %bb.0:
-; AVX512F-ONLY-SLOW-NEXT:    subq $72, %rsp
+; AVX512F-ONLY-SLOW-NEXT:    pushq %rax
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm4
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0],ymm1[1],ymm4[2,3,4],ymm1[5],ymm4[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm10
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm1, %ymm4
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[6,7,12,13,2,3,16,17,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm3[2],ymm2[3,4,5],ymm3[6],ymm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm3, %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm2, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm3, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm2, %ymm11
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[u,u,u,u,u,u,u,u,u,u,u,u]
@@ -5750,29 +5748,29 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastw 252(%rdi), %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[0,1,0,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm5, %xmm12
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm5, %xmm13
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
 ; AVX512F-ONLY-SLOW-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm1, %zmm22 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1],ymm6[2,3],ymm3[4,5],ymm6[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm26
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm3[2,3],ymm6[4,5],ymm3[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm3, %ymm9
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm15 = xmm2[0,1,2],xmm1[3],xmm2[4],xmm1[5],xmm2[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm9[0,1],ymm4[2],ymm9[3,4],ymm4[5],ymm9[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm10[0,1],ymm4[2],ymm10[3,4],ymm4[5],ymm10[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6,7,8,9,10],ymm1[11],ymm2[12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[8,9,6,7,4,5,18,19,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm10[0,1,2],ymm11[3],ymm10[4,5],ymm11[6],ymm10[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[8,9,6,7,4,5,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0,1,2],ymm12[3],ymm11[4,5],ymm12[6],ymm11[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3,4,5],xmm2[6],xmm3[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[2,3,0,1,14,15,12,13,10,11],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, (%rsp) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1],ymm8[2],ymm7[3,4,5],ymm8[6],ymm7[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
@@ -5781,15 +5779,15 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,1,1,3,4,5,5,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,5,6,8,9,10,11,12,13,13,14]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5,6],ymm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0],xmm12[1],xmm5[2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0],xmm13[1],xmm5[2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[1,0,3,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm4[2,3],ymm9[4,5],ymm4[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1],ymm4[2,3],ymm10[4,5],ymm4[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6,7,8,9,10,11],ymm1[12],ymm0[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[10,11,8,9,6,7,20,21,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm11[0],ymm10[1],ymm11[2,3],ymm10[4],ymm11[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[10,11,8,9,6,7,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0],ymm11[1],ymm12[2,3],ymm11[4],ymm12[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3,4,5],xmm2[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[4,5,2,3,0,1,14,15,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -5804,58 +5802,57 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,1,0,3,4,5,4,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm12, %xmm19
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm13[0],xmm5[0],xmm13[1],xmm5[1],xmm13[2],xmm5[2],xmm13[3],xmm5[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm31
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm26
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm24
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2],ymm4[3],ymm9[4,5],ymm4[6],ymm9[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm9, %ymm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm30
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2],ymm4[3],ymm10[4,5],ymm4[6],ymm10[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm25
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[4,5,10,11,0,1,22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm11[0],ymm10[1],ymm11[2,3,4],ymm10[5],ymm11[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm29
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm28
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[4,5,10,11,0,1,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0],ymm11[1],ymm12[2,3,4],ymm11[5],ymm12[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm29
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm28
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[6,7,4,5,2,3,0,1,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm18[0,1,1,3]
+; AVX512F-ONLY-SLOW-NEXT:    vporq %ymm0, %ymm1, %ymm21
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm18[0,1,1,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0],ymm7[1],ymm8[2,3,4],ymm7[5],ymm8[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5,2,3,0,1,14,15,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm9[0,1,2,1,4,5,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm12[0,1,2,1,4,5,6,5]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastw 232(%rdi), %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm5, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpsrlq $48, %xmm5, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm21
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm18
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4],xmm1[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,8,9,6,7,4,5,2,3,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,6,7,4,5,2,3,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm14
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm1[0,1,2,3,4,5],xmm3[6],xmm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm25
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm1[0,1,2,3,4,5],xmm14[6],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,7,6]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7],ymm0[8,9,10,11,12],ymm2[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpsrld $16, %xmm12, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm16
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm13, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpsrld $16, %xmm13, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm19
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm20
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm14[0,1,2],ymm13[3],ymm14[4,5],ymm13[6],ymm14[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1,2],ymm13[3],ymm0[4,5],ymm13[6],ymm0[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm2[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6],ymm2[7,8,9,10,11,12,13],ymm3[14],ymm2[15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm15[0,2,2,1]
@@ -5872,13 +5869,12 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,0,2,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm3[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm31
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1,2],ymm9[3],ymm6[4,5],ymm9[6],ymm6[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4],xmm3[5],xmm2[6],xmm3[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm13[0],ymm14[1],ymm13[2,3],ymm14[4],ymm13[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm13[0],ymm0[1],ymm13[2,3],ymm0[4],ymm13[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1,2,3,4,5,6],ymm4[7,8],ymm3[9,10,11,12,13,14],ymm4[15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u]
@@ -5893,12 +5889,15 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0,1,2,3,4,5],ymm3[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0],ymm14[1],ymm13[2,3,4],ymm14[5],ymm13[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm17
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0],ymm0[1],ymm13[2,3,4],ymm0[5],ymm13[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm30
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm2[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,7,20,21,u,u,16,17,30,31,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3,4,5,6,7,8],ymm3[9],ymm2[10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1],ymm5[2],ymm6[3,4,5],ymm5[6],ymm6[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm9[0,1],ymm6[2],ymm9[3,4,5],ymm6[6],ymm9[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm9, %ymm4
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm10
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm10[4],xmm3[5],xmm10[6],xmm3[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[0,1,2,3,0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[u,u,u,u,u,u,u,u]
@@ -5911,28 +5910,29 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm10[4],xmm2[5],xmm10[5],xmm2[6],xmm10[6],xmm2[7],xmm10[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm2[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm17
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm16
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm10
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm10[1],xmm2[2,3,4,5],xmm10[6],xmm2[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,2,1,0,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm9[0,1,2,0,4,5,6,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm12[0,1,2,0,4,5,6,4]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5,7,4,8,9,10,11,12,13,15,12]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm10[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm0[0,1,2,3,6,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,2,2,2]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm10, %zmm2, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1,2],ymm5[3],ymm6[4,5],ymm5[6],ymm6[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm9[0,1,2],ymm6[3],ymm9[4,5],ymm6[6],ymm9[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm12
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm12[0],xmm2[1],xmm12[2,3,4,5],xmm2[6],xmm12[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm15[0,1],ymm11[2],ymm15[3,4,5],ymm11[6],ymm15[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm9
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm12[0,1,2,3],xmm9[4],xmm12[5],xmm9[6],xmm12[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1],ymm14[2],ymm13[3,4],ymm14[5],ymm13[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1],ymm3[2],ymm13[3,4],ymm3[5],ymm13[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[1,1,2,0]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[0,1,22,23,28,29,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm2, %ymm12, %ymm2
@@ -5941,27 +5941,25 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm12[0,1,2],ymm9[3,4,5,6,7],ymm12[8,9,10],ymm9[11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm31
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm30
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1],ymm7[2,3],ymm8[4,5],ymm7[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm8
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0,1,2],xmm7[3],xmm8[4],xmm7[5],xmm8[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,10,11,8,9,6,7,4,5,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm14[0],xmm1[1],xmm14[2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[8,9,10,11,8,9,6,7,4,5,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm7[0,1,2,3,4],ymm1[5,6,7],ymm7[8,9,10,11,12],ymm1[13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,3,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm7, %zmm1, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm7, %zmm1, %zmm19
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1],ymm0[2],ymm3[3,4,5],ymm0[6],ymm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm0[2],ymm2[3,4,5],ymm0[6],ymm2[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm1[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,1,14,15,u,u,10,11,24,25,24,25,24,25,24,25,16,17,30,31,u,u,26,27]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm7[6],ymm1[7,8,9,10,11,12,13],ymm7[14],ymm1[15]
@@ -5973,15 +5971,14 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,2,2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1],ymm1[2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1],ymm1[2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm15[0,1,2],ymm11[3],ymm15[4,5],ymm11[6],ymm15[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm9
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm9[0],xmm7[1],xmm9[2,3,4,5],xmm7[6],xmm9[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm14, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm13[0,1],ymm14[2,3],ymm13[4,5],ymm14[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm13[0,1],ymm3[2,3],ymm13[4,5],ymm3[6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm13[0,1,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0,1,2],ymm12[3],ymm9[4,5,6,7,8,9,10],ymm12[11],ymm9[12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm5[0],ymm6[1],ymm5[2,3],ymm6[4],ymm5[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm6[0],ymm4[1],ymm6[2,3],ymm4[4],ymm6[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm14
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0],xmm14[1],xmm12[2,3,4,5],xmm14[6],xmm12[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,2,3,0,1,14,15,12,13,10,11]
@@ -5994,13 +5991,12 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm11[0],ymm15[1],ymm11[2,3],ymm15[4],ymm11[5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm12
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0],xmm12[1],xmm7[2,3,4,5],xmm12[6],xmm7[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm1[3],ymm13[4,5],ymm1[6],ymm13[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm16
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm3[3],ymm13[4,5],ymm3[6],ymm13[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm27
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm12[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm14[4],ymm12[5,6,7,8,9,10,11],ymm14[12],ymm12[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm27
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm5, %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm6[0],ymm4[1],ymm6[2,3,4],ymm4[5],ymm6[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm28
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm14[1],xmm5[2],xmm14[3],xmm5[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,4,5,2,3,0,1,14,15,12,13]
@@ -6010,114 +6006,113 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm5, %ymm12, %ymm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1],ymm10[2,3],ymm8[4,5],ymm10[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm12
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm2[0,1,2],ymm0[3],ymm2[4,5],ymm0[6],ymm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm7[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm12[0],ymm7[1,2,3,4,5,6],ymm12[7,8],ymm7[9,10,11,12,13,14],ymm12[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm8[0,1],ymm10[2,3],ymm8[4,5],ymm10[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm14
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[0,2,2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[0,2,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[1,3,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm12[0],xmm7[1],xmm12[1],xmm7[2],xmm12[2],xmm7[3],xmm12[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm3[0,1,2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm12[0],ymm14[1,2,3,4,5,6],ymm12[7,8],ymm14[9,10,11,12,13,14],ymm12[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,2,3,0,1,14,15,12,13,26,27,26,27,26,27,26,27,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm14[2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm0[0],ymm3[1],ymm0[2,3],ymm3[4],ymm0[5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm18, %zmm0, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm0, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm31, %zmm0, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[1,3,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm12[0],xmm14[0],xmm12[1],xmm14[1],xmm12[2],xmm14[2],xmm12[3],xmm14[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,2,3,0,1,14,15,12,13,26,27,26,27,26,27,26,27,18,19,16,17,30,31,28,29]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm12[0,1],ymm7[2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm12[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0],ymm14[1],ymm12[2,3,4,5,6,7,8],ymm14[9],ymm12[10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm31, %zmm0, %zmm14
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm0, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm14[0],ymm12[1],ymm14[2,3,4,5,6,7,8],ymm12[9],ymm14[10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm8[0,1,2],ymm10[3],ymm8[4,5],ymm10[6],ymm8[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm14[0,1,2,3,6,4,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,5,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,4,5,2,3,0,1,14,15,28,29,28,29,28,29,28,29,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm12[2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0],ymm15[1],ymm11[2,3,4],ymm15[5],ymm11[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm12[0],xmm11[1],xmm12[2],xmm11[3],xmm12[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2],ymm10[3],ymm8[4,5],ymm10[6],ymm8[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm8[0,1,2,3,6,4,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,7,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm12[u,u,u,u,u,u,u,u,4,5,2,3,0,1,14,15,28,29,28,29,28,29,28,29,20,21,18,19,16,17,30,31]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1],ymm8[2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm11[0],ymm15[1],ymm11[2,3,4],ymm15[5],ymm11[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm11
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm11[0],xmm8[1],xmm11[2],xmm8[3],xmm11[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm0[2],ymm6[3,4],ymm0[5],ymm6[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[0,3,1,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm11
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[0,3,1,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,0,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm13[2],ymm0[3,4,5],ymm13[6],ymm0[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,3,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[4,5,10,11,u,u,u,u,u,u,u,u,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm6[1,2],ymm0[3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm11[u,u,u,u,u,u,6,7,4,5,2,3,0,1,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm8[u,u,u,u,u,u,6,7,4,5,2,3,0,1,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm0[0,1,2],ymm6[3,4,5,6,7],ymm0[8,9,10],ymm6[11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm22, %zmm6, %zmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 (%rsp), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm23
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm22, %zmm6, %zmm14
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm23
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm23, %zmm6, %zmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm24 # 64-byte Folded Reload
 ; AVX512F-ONLY-SLOW-NEXT:    movw $-512, %ax # imm = 0xFE00
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm3, %zmm24 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm26 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm1, %zmm26 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm21, %zmm6, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm9, %zmm2 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, (%rsi)
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm21, %zmm6, %zmm26
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm2, %zmm26 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm18, %zmm6, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm9, %zmm1 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm14, (%rsi)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, (%rdx)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm24, (%rcx)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, (%r8)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, (%r9)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, (%r9)
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm20, %zmm6, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm5, %zmm7 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm25, %zmm7 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, (%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm25, %zmm6, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm8 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    addq $72, %rsp
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm19, %zmm6, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm5 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, (%rax)
+; AVX512F-ONLY-SLOW-NEXT:    popq %rax
 ; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
 ; AVX512F-ONLY-SLOW-NEXT:    retq
 ;
 ; AVX512F-ONLY-FAST-LABEL: load_i16_stride7_vf32:
 ; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    pushq %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [2,6,9,13,2,6,9,13]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm30
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [2,6,9,13,2,6,9,13]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm25
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [2,5,9,12,2,5,9,12]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [10,3,6,15,12,13,6,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [3,6,10,13,3,6,10,13]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = <1,u,u,u,5,8,12,15>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <2,6,9,u,13,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm31, %zmm2, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm22, %zmm0, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <1,u,u,u,4,8,11,15>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm3, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <2,5,9,u,12,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm31, %zmm3, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm22, %zmm18, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <0,u,u,u,4,7,11,14>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm5, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [10,3,6,15,12,13,6,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [3,6,10,13,3,6,10,13]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = <1,u,u,u,5,8,12,15>
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <2,6,9,u,13,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm23, %zmm1, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <1,u,u,u,4,8,11,15>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm0, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <2,5,9,u,12,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm0, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm23, %zmm18, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <0,u,u,u,4,7,11,14>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm0, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [8,1,12,5,12,5,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm31, %zmm5, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm25, %zmm21, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm29
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm29[0,1,0,2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm12, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm5, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm25, %zmm22, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm28
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm28[0,1,0,2]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm6[0,1,2,3,4,5,6],ymm8[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[6,7,12,13,2,3,16,17,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -6127,277 +6122,279 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm13
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3],xmm13[4],xmm10[5],xmm13[6],xmm10[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vporq %ymm9, %ymm10, %ymm27
+; AVX512F-ONLY-FAST-NEXT:    vporq %ymm9, %ymm10, %ymm26
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 252(%rdi), %xmm9
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 224(%rdi), %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm10[u,u,u,u,u,u,u,u,0,1,14,15,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm10[u,u,u,u,u,u,u,u,0,1,14,15,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
 ; AVX512F-ONLY-FAST-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm9, %zmm8, %zmm27 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm9, %zmm8, %zmm26 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm8
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 288(%rdi), %ymm9
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm15[0,1,2],xmm13[3],xmm15[4],xmm13[5],xmm15[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm14[0,1,2],xmm13[3],xmm14[4],xmm13[5],xmm14[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,u,u,u,u,10,11,8,9,6,7,4,5,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm7[0,1,2],xmm13[3,4,5,6],xmm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm13[0,1,2,3],ymm7[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm0[0,1,2],xmm13[3,4,5,6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm14, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3,4,5],ymm14[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[0,1,6,7,8,9,18,19,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm5[0,1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm14[0],xmm7[1],xmm14[2,3,4,5],xmm7[6],xmm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[2,3,0,1,14,15,12,13,10,11],zero,zero,zero,zero,zero,zero,zero,zero,ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm3, %ymm7, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 160(%rdi), %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm3[0,1],ymm15[2],ymm3[3,4,5],ymm15[6],ymm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm14[4],xmm7[5],xmm14[6],xmm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 240(%rdi), %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[0,1,0,1,14,15,12,13,10,11,8,9,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm26 = [0,1,2,15,0,1,2,15]
-; AVX512F-ONLY-FAST-NEXT:    # ymm26 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm12, %ymm26, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm14[0],xmm10[1],xmm14[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[2,3,0,1,14,15,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm7, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2],ymm8[3],ymm9[4,5],ymm8[6],ymm9[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm12[0,1,2,3],xmm7[4],xmm12[5],xmm7[6],xmm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm11[0,1,2],xmm7[3,4,5,6],xmm11[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm11[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3,4,5],ymm4[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm19
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,3,4,5,10,11,16,17,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm7[1],xmm4[2,3,4,5],xmm7[6],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[4,5,2,3,0,1,14,15,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm2, %ymm4, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm4[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[0,1,6,7,8,9,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2,3,4,5],xmm3[6],xmm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[2,3,0,1,14,15,12,13,10,11],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm3, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2],ymm15[3],ymm3[4,5],ymm15[6],ymm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0],xmm2[1],xmm4[2,3,4,5],xmm2[6],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[0,1,2,3,0,1,14,15,12,13,10,11,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 160(%rdi), %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm3[2],ymm4[3,4,5],ymm3[6],ymm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm14[4],xmm0[5],xmm14[6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 240(%rdi), %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,14,15,12,13,10,11,8,9,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm15[0],xmm10[1],xmm15[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[2,3,0,1,14,15,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2],ymm8[3],ymm9[4,5],ymm8[6],ymm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm11[0,1,2,3],xmm0[4],xmm11[5],xmm0[6],xmm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm12[2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm11[0,1,2],xmm0[3,4,5,6],xmm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm7[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,3,4,5,10,11,16,17],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm7[1],xmm2[2,3,4,5],xmm7[6],xmm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[4,5,2,3,0,1,14,15,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vporq %ymm0, %ymm2, %ymm31
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2],ymm3[3],ymm4[4,5],ymm3[6],ymm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2,3,4,5],xmm0[6],xmm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,0,1,14,15,12,13,10,11,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [21474836482,21474836482,21474836482,21474836482]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm29, %ymm2, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm28, %ymm2, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm2, %ymm26, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm2[8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1],ymm9[2],ymm8[3,4,5],ymm9[6],ymm8[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm7[4],xmm4[5],xmm7[6],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm23, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[0,1,2,3,0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1],ymm9[2],ymm8[3,4,5],ymm9[6],ymm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm7[4],xmm0[5],xmm7[6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm24, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,2,3,0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[2,3,16,17,22,23,24,25,30,31,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm7, %ymm4, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm22, %zmm21, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm7, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm23, %zmm22, %zmm7
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm7, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm7[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2],xmm4[3],xmm7[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm31, %zmm16, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[4,5,10,11,0,1,22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[6,7,4,5,2,3,0,1,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vporq %ymm7, %ymm4, %ymm20
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm15[0],ymm3[1],ymm15[2,3],ymm3[4],ymm15[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm7[1],xmm4[2,3,4,5],xmm7[6],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[0,1,4,5,2,3,0,1,14,15,12,13,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm29[0,1,1,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm13, %ymm26, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm7[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm24
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0],xmm0[1],xmm7[2],xmm0[3],xmm7[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm19, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[4,5,10,11,0,1,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[6,7,4,5,2,3,0,1,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vporq %ymm7, %ymm0, %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm7[1],xmm0[2,3,4,5],xmm7[6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,2,3,0,1,14,15,12,13,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm28[0,1,1,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm13[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[12,13,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm4, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 416(%rdi), %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdi), %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1],ymm4[2],ymm13[3,4,5],ymm4[6],ymm13[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm7, %zmm22
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 416(%rdi), %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdi), %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1],ymm13[2],ymm7[3,4,5],ymm13[6],ymm7[7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm12
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm12[4],xmm2[5],xmm12[6],xmm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm8[0,1,2],ymm9[3],ymm8[4,5],ymm9[6],ymm8[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1],xmm11[2,3,4,5],xmm12[6],xmm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm14[0],xmm12[1],xmm14[2,3,4,5],xmm12[6],xmm14[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [2,11,2,11,12,5,8,9]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,0,1,14,15,12,13,10,11,8,9]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [2,11,2,11,12,5,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm12, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[0,1,22,23,28,29,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm12[0,1,2],ymm2[3,4,5,6,7],ymm12[8,9,10],ymm2[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm12, %ymm11, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm16
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm15[0],ymm3[1],ymm15[2,3,4],ymm3[5],ymm15[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm11[0],xmm3[1],xmm11[2],xmm3[3],xmm11[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm14, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[0,1,22,23,28,29,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm14[0,1,2],ymm2[3,4,5,6,7],ymm14[8,9,10],ymm2[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm14, %ymm12, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3,4],ymm4[5],ymm3[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2],xmm3[3],xmm4[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[0,1,6,7,4,5,2,3,0,1,14,15,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm7, %ymm26, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 232(%rdi), %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpsrlq $48, %xmm14, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm25, %zmm18, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5,6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 232(%rdi), %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpsrlq $48, %xmm15, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm25, %zmm18, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <0,3,7,10,14,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm7, %zmm3, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,u,u,0,3,7,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm29, %ymm15, %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0,1,2,3,4],ymm15[5,6,7],ymm11[8,9,10,11,12],ymm15[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpsrld $16, %xmm10, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm15 = xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm15, %zmm11, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm7, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[3,1,2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm15[0],xmm7[0],xmm15[1],xmm7[1],xmm15[2],xmm7[2],xmm15[3],xmm7[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm31, %zmm12, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm12, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm15[0,1],ymm12[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm12, %zmm15, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm4[3],ymm13[4,5],ymm4[6],ymm13[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm12[1],xmm2[2,3,4,5],xmm12[6],xmm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm3, %zmm0, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1],ymm5[2],ymm6[3,4],ymm5[5],ymm6[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <0,3,3,u,0,3,7,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm28, %ymm14, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,0,1,6,7,8,9,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[0,1,0,1,6,7,8,9,14,15,u,u,u,u,u,u,16,17,16,17,22,23,24,25,30,31,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm14[5,6,7],ymm4[8,9,10,11,12],ymm14[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpsrld $16, %xmm10, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm14, %zmm4, %zmm18
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm12, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm12, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm19
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm4[0,1],ymm12[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm12, %zmm4, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm7[0,1,2],ymm13[3],ymm7[4,5],ymm13[6],ymm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1],xmm11[2,3,4,5],xmm12[6],xmm11[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm12[0],xmm0[1],xmm12[2,3,4,5],xmm0[6],xmm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,2,3,0,1,14,15,12,13,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm12[0],xmm2[1],xmm12[2,3,4,5],xmm2[6],xmm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,2,3,0,1,14,15,12,13,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <2,u,u,u,6,9,13,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm12, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm12, %zmm12
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[2,3,16,17,22,23,24,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm12[0,1,2],ymm2[3,4,5,6,7],ymm12[8,9,10],ymm2[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm12, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm0, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm23, %zmm0, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [2,6,9,13,2,6,9,13]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm25, %zmm2, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm12[0,1,2],ymm11[3,4,5,6,7],ymm12[8,9,10],ymm11[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm2, %ymm12, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm2, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [2,6,9,13,2,6,9,13]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm25, %zmm1, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <0,4,7,11,14,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm31, %zmm16, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm1[0,1],ymm0[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0],ymm13[1],ymm4[2,3],ymm13[4],ymm4[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3,4,5],xmm1[6],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,4,7,0,0,4,7,0]
-; AVX512F-ONLY-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm29, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7],ymm2[8,9,10,11,12],ymm1[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm10[4],xmm14[4],xmm10[5],xmm14[5],xmm10[6],xmm14[6],xmm10[7],xmm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <3,u,u,u,6,10,13,u>
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0],ymm8[1],ymm9[2,3,4],ymm8[5],ymm9[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm14[0],xmm1[1],xmm14[2],xmm1[3],xmm14[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm10, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm16, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[10,11,6,7,4,5,6,7,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm0[0,1],ymm11[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0],ymm7[1],ymm13[2,3],ymm7[4],ymm13[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm12[1],xmm0[2,3,4,5],xmm12[6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [0,4,7,0,0,4,7,0]
+; AVX512F-ONLY-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm28, %ymm12, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,2,3,4,5,10,11,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[4,5,2,3,4,5,10,11,12,13,u,u,u,u,u,u,20,21,18,19,20,21,26,27,28,29,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm12[5,6,7],ymm1[8,9,10,11,12],ymm12[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm10 = xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <3,u,u,u,6,10,13,u>
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm9[0],ymm8[1],ymm9[2,3,4],ymm8[5],ymm9[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm15[0],xmm10[1],xmm15[2],xmm10[3],xmm15[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm12, %zmm12
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,4,5,2,3,0,1,14,15,12,13]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[0,1,18,19,20,21,26,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3,4,5,6,7],ymm10[8,9,10],ymm0[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm1, %ymm10, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <1,4,8,11,15,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm31, %zmm1, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[0,1,18,19,20,21,26,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm12[0,1,2],ymm0[3,4,5,6,7],ymm12[8,9,10],ymm0[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm12, %ymm10, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <1,4,8,11,15,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm10, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3],ymm6[4,5],ymm5[6],ymm6[7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u,u,10,11,6,7,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u,u,10,11,6,7,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,4,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1],ymm1[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm13[1],ymm4[2,3,4],ymm13[5],ymm4[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1],xmm5[2],xmm4[3],xmm5[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,1,10,3,14,7,10,3]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm9[0,1],ymm8[2],ymm9[3,4],ymm8[5],ymm9[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,3,1,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,8,9,4,5,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm5, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[4,5,10,11,u,u,u,u,u,u,u,u,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0],ymm6[1,2],ymm5[3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,6,7,4,5,2,3,0,1,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3,4,5,6,7],ymm5[8,9,10],ymm4[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0],ymm7[1],ymm13[2,3,4],ymm7[5],ymm13[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2],xmm5[3],xmm6[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,1,10,3,14,7,10,3]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1],ymm8[2],ymm9[3,4],ymm8[5],ymm9[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,3,1,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[8,9,8,9,4,5,6,7,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm6, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[4,5,10,11,u,u,u,u,u,u,u,u,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0],ymm7[1,2],ymm6[3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,6,7,4,5,2,3,0,1,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0,1,2],ymm5[3,4,5,6,7],ymm6[8,9,10],ymm5[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm27, %zmm5, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm28, %zmm5, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm17 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm26, %zmm6, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm27, %zmm6, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm31, %zmm4, %zmm17
 ; AVX512F-ONLY-FAST-NEXT:    movw $-512, %di # imm = 0xFE00
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %edi, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm23, %zmm17 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm20, %zmm15, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm26, %zmm21 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm24, %zmm17 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm20, %zmm4, %zmm22
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm2, %zmm22 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm25, %zmm3 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm18, %zmm15, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm11 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, (%rsi)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, (%rdx)
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm18, %zmm4, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm12, %zmm11 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, (%rsi)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, (%rdx)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, (%rcx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, (%r8)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, (%r8)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, (%r9)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, (%rax)
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm15, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm4, %zmm1 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    popq %rax
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm4, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm5, %zmm0 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
 ; AVX512DQ-SLOW-LABEL: load_i16_stride7_vf32:
 ; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm5
+; AVX512DQ-SLOW-NEXT:    pushq %rax
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm4
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0],ymm1[1],ymm4[2,3,4],ymm1[5],ymm4[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm4, %ymm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm9
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0],ymm1[1],ymm9[2,3,4],ymm1[5],ymm9[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm5
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[6,7,12,13,2,3,16,17,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1],ymm3[2],ymm5[3,4,5],ymm3[6],ymm5[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm3[2],ymm2[3,4,5],ymm3[6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm2, %ymm4
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[u,u,u,u,u,u,u,u,u,u,u,u]
@@ -6417,25 +6414,27 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastw 252(%rdi), %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm7
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm7[0,1,0,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm10
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm10[0,1,0,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm10, %xmm11
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
 ; AVX512DQ-SLOW-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
 ; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm1, %zmm21 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm13
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm2[2,3],ymm13[4,5],ymm2[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm2, %ymm12
+; AVX512DQ-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm3
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm12
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm2, %ymm10
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm14 = xmm2[0,1,2],xmm1[3],xmm2[4],xmm1[5],xmm2[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm11
+; AVX512DQ-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm13
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm1
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm10[0,1],ymm9[2],ymm10[3,4],ymm9[5],ymm10[6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm9[0,1],ymm5[2],ymm9[3,4],ymm5[5],ymm9[6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6,7,8,9,10],ymm1[11],ymm2[12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[8,9,6,7,4,5,18,19,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[8,9,6,7,4,5,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2],ymm7[3],ymm4[4,5],ymm7[6],ymm4[7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2,3,4,5],xmm2[6],xmm3[7]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[2,3,0,1,14,15,12,13,10,11],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -6449,19 +6448,20 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,1,1,3,4,5,5,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,5,6,8,9,10,11,12,13,13,14]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5,6],ymm0[7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm11[0],xmm7[1],xmm11[2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm13[0],xmm11[1],xmm13[2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[1,0,3,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1],ymm9[2,3],ymm10[4,5],ymm9[6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm5[2,3],ymm9[4,5],ymm5[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4],ymm0[5,6,7,8,9,10,11],ymm1[12],ymm0[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[10,11,8,9,6,7,20,21,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[10,11,8,9,6,7,20,21],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0],ymm4[1],ymm7[2,3],ymm4[4],ymm7[5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3,4,5],xmm2[6],xmm1[7]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[4,5,2,3,0,1,14,15,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vporq %ymm0, %ymm1, %ymm20
+; AVX512DQ-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2],ymm8[3],ymm6[4,5],ymm8[6],ymm6[7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5],xmm0[6],xmm1[7]
@@ -6471,17 +6471,18 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,1,0,3,4,5,4,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm11[0],xmm13[0],xmm11[1],xmm13[1],xmm11[2],xmm13[2],xmm11[3],xmm13[3]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm28
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm24
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2],ymm9[3],ymm10[4,5],ymm9[6],ymm10[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm26
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2],ymm5[3],ymm9[4,5],ymm5[6],ymm9[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm26
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[4,5,10,11,0,1,22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0],ymm5[1],ymm4[2,3,4],ymm5[5],ymm4[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm27
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[4,5,10,11,0,1,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0],ymm4[1],ymm7[2,3,4],ymm4[5],ymm7[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm7, %ymm27
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm4, %ymm5
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[6,7,4,5,2,3,0,1,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -6496,13 +6497,13 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastw 232(%rdi), %xmm1
-; AVX512DQ-SLOW-NEXT:    vpsrlq $48, %xmm11, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpsrlq $48, %xmm13, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm19
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm20
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1],ymm6[2],ymm8[3,4],ymm6[5],ymm8[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3],xmm0[4],xmm1[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,8,9,6,7,4,5,2,3,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,6,7,4,5,2,3,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm2
@@ -6513,56 +6514,59 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,7,6]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7],ymm0[8,9,10,11,12],ymm1[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpsrld $16, %xmm7, %xmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm7, %xmm31
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm11, %xmm16
+; AVX512DQ-SLOW-NEXT:    vpsrld $16, %xmm11, %xmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm11, %xmm31
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm13[4],xmm1[5],xmm13[5],xmm1[6],xmm13[6],xmm1[7],xmm13[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm13, %xmm19
 ; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm23
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm1
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2],ymm15[3],ymm1[4,5],ymm15[6],ymm1[7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm2[6],ymm0[7,8,9,10,11,12,13],ymm2[14],ymm0[15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm14[0,2,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm13
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2],ymm15[3],ymm13[4,5],ymm15[6],ymm13[7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm0[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6],ymm0[7,8,9,10,11,12,13],ymm3[14],ymm0[15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm14[0,2,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,14,15,12,13,10,11,4,5,6,7,8,9,2,3,16,17,30,31,28,29,26,27,20,21,22,23,24,25,18,19]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm0[0,1,2],xmm2[3,4,5,6],xmm0[7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0,1,2],xmm3[3,4,5,6],xmm0[7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm14
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 416(%rdi), %ymm11
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0,1],ymm14[2],ymm11[3,4],ymm14[5],ymm11[6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm11[0,1],ymm14[2],ymm11[3,4],ymm14[5],ymm11[6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,3,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm0[0,1,2,3,4,5],ymm2[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm12, %ymm7
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2],ymm12[3],ymm13[4,5],ymm12[6],ymm13[7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm15[0],ymm1[1],ymm15[2,3],ymm1[4],ymm15[5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm2[2,3,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm12[0],ymm2[1,2,3,4,5,6],ymm12[7,8],ymm2[9,10,11,12,13,14],ymm12[15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,0,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm0[0,1,2,3,4,5],ymm3[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm12, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2],ymm10[3],ymm12[4,5],ymm10[6],ymm12[7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4],xmm3[5],xmm0[6],xmm3[7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm15[0],ymm13[1],ymm15[2,3],ymm13[4],ymm15[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm3[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm12[0],ymm3[1,2,3,4,5,6],ymm12[7,8],ymm3[9,10,11,12,13,14],ymm12[15]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[2,3,0,1,14,15,12,13,4,5,4,5,4,5,4,5,18,19,16,17,30,31,28,29,20,21,20,21,20,21,20,21]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3,4,5,6],xmm2[7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm11[0,1],ymm14[2,3],ymm11[4,5],ymm14[6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm12
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[2,3,0,1,14,15,12,13,4,5,4,5,4,5,4,5,18,19,16,17,30,31,28,29,20,21,20,21,20,21,20,21]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[3,4,5,6],xmm3[7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm11[0,1],ymm14[2,3],ymm11[4,5],ymm14[6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm12
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[0,1,0,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],ymm2[6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0],ymm1[1],ymm15[2,3,4],ymm1[5],ymm15[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,1,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm12[0],xmm3[1],xmm12[1],xmm3[2],xmm12[2],xmm3[3],xmm12[3]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm16
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0],ymm13[1],ymm15[2,3,4],ymm13[5],ymm15[6,7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm0[2,3,0,1]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,7,20,21,u,u,16,17,30,31,u,u,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm12[1],ymm0[2,3,4,5,6,7,8],ymm12[9],ymm0[10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm7[0,1],ymm13[2],ymm7[3,4,5],ymm13[6],ymm7[7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm10[0,1],ymm1[2],ymm10[3,4,5],ymm1[6],ymm10[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm3
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm10
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm12[0,1,2,3],xmm10[4],xmm12[5],xmm10[6],xmm12[7]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[0,1,2,3,0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[u,u,u,u,u,u,u,u]
@@ -6576,7 +6580,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3,4,5],ymm0[6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm20, %zmm25, %zmm24
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm24 # 64-byte Folded Reload
 ; AVX512DQ-SLOW-NEXT:    movw $-512, %ax # imm = 0xFE00
 ; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm24 {%k1}
@@ -6589,18 +6593,18 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm17[0,1,2,0,4,5,6,4]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5,7,4,8,9,10,11,12,13,15,12]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm10[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm2[0,1,2,3,6,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm1[0,1,2,3,6,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,2,2,2]
 ; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm10, %zmm0, %zmm17
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2],ymm13[3],ymm7[4,5],ymm13[6],ymm7[7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2],ymm3[3],ymm7[4,5],ymm3[6],ymm7[7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm10
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm10[0],xmm0[1],xmm10[2,3,4,5],xmm0[6],xmm10[7]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm14[0,1],ymm11[2],ymm14[3,4,5],ymm11[6],ymm14[7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm12
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3],xmm12[4],xmm10[5],xmm12[6],xmm10[7]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm15[0,1],ymm1[2],ymm15[3,4],ymm1[5],ymm15[6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm15[0,1],ymm13[2],ymm15[3,4],ymm13[5],ymm15[6,7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[1,1,2,0]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[0,1,22,23,28,29,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512DQ-SLOW-NEXT:    vpor %ymm0, %ymm12, %ymm0
@@ -6611,15 +6615,15 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm10[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm18, %zmm25, %zmm17
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm17 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm0[2],ymm9[3,4,5],ymm0[6],ymm9[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm9, %ymm20
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm9[2],ymm2[3,4,5],ymm9[6],ymm2[7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm0[2,3,0,1]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,0,1,14,15,u,u,10,11,24,25,24,25,24,25,24,25,16,17,30,31,u,u,26,27]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm10[6],ymm0[7,8,9,10,11,12,13],ymm10[14],ymm0[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm2[0,1],ymm5[2],ymm2[3,4],ymm5[5],ymm2[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm27
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm9
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm9[0,1],ymm5[2],ymm9[3,4],ymm5[5],ymm9[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm18
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm12
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[3,1,2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,1,2,3]
@@ -6629,10 +6633,10 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm14[0,1,2],ymm11[3],ymm14[4,5],ymm11[6],ymm14[7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm12
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm12[0],xmm10[1],xmm12[2,3,4,5],xmm10[6],xmm12[7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm15[0,1],ymm1[2,3],ymm15[4,5],ymm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm15[0,1],ymm13[2,3],ymm15[4,5],ymm13[6,7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm15[0,1,0,1]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm12[0,1,2],ymm9[3],ymm12[4,5,6,7,8,9,10],ymm9[11],ymm12[12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0],ymm7[1],ymm13[2,3],ymm7[4],ymm13[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm3[0],ymm7[1],ymm3[2,3],ymm7[4],ymm3[5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm5
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm12[0],xmm5[1],xmm12[2,3,4,5],xmm5[6],xmm12[7]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[u,u,u,u,u,u,2,3,0,1,14,15,12,13,10,11]
@@ -6642,15 +6646,17 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
 ; AVX512DQ-SLOW-NEXT:    vpor %ymm5, %ymm9, %ymm5
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm10[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm19, %zmm25, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm20, %zmm25, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm5, %zmm0, %zmm0 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm11[0],ymm14[1],ymm11[2,3],ymm14[4],ymm11[5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm9
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm9[1],xmm5[2,3,4,5],xmm9[6],xmm5[7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm15[0,1,2],ymm1[3],ymm15[4,5],ymm1[6],ymm15[7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm15[0,1,2],ymm13[3],ymm15[4,5],ymm13[6],ymm15[7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm9[2,3,0,1]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4],ymm9[5,6,7,8,9,10,11],ymm10[12],ymm9[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm13[0],ymm7[1],ymm13[2,3,4],ymm7[5],ymm13[6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm3[0],ymm7[1],ymm3[2,3,4],ymm7[5],ymm3[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm20
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm7, %ymm26
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm12
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm12[0],xmm10[1],xmm12[2],xmm10[3],xmm12[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,4,5,2,3,0,1,14,15,12,13]
@@ -6659,64 +6665,67 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm9[0,1,2],ymm5[3,4,5,6,7],ymm9[8,9,10],ymm5[11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
 ; AVX512DQ-SLOW-NEXT:    vpor %ymm9, %ymm10, %ymm9
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm19
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm2, %ymm12
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm9[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm10[0],ymm9[1,2,3,4,5,6],ymm10[7,8],ymm9[9,10,11,12,13,14],ymm10[15]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0,1],ymm6[2,3],ymm8[4,5],ymm6[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm8
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm8[0,1,2],xmm6[3],xmm8[4],xmm6[5],xmm8[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm3
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm8
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm8[1],xmm3[2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,10,11,8,9,6,7,4,5,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm8[1],xmm2[2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[8,9,10,11,8,9,6,7,4,5,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,7]
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm6[0,1,2,3,4],ymm3[5,6,7],ymm6[8,9,10,11,12],ymm3[13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm6[0,1,2,3,4],ymm2[5,6,7],ymm6[8,9,10,11,12],ymm2[13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm8
 ; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm8
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,3,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm3, %zmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm3
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm9
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,2,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm2, %zmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm10
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1],ymm10[2,3],ymm7[4,5],ymm10[6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm8
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,2,2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[1,3,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm12
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm12[0,1,2],ymm10[3],ymm12[4,5],ymm10[6],ymm12[7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm8[0],ymm9[1,2,3,4,5,6],ymm8[7,8],ymm9[9,10,11,12,13,14],ymm8[15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,u,u,u,u,u,2,3,0,1,14,15,12,13,26,27,26,27,26,27,26,27,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm9[2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm9[u,u,u,u,u,u,u,u,2,3,0,1,14,15,12,13,26,27,26,27,26,27,26,27,18,19,16,17,30,31,28,29]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1],ymm8[2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm23, %zmm25, %zmm6
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm5, %zmm0, %zmm6 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm10[0],ymm12[1],ymm10[2,3],ymm12[4],ymm10[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0],ymm12[1],ymm1[2,3],ymm12[4],ymm1[5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm5[2,3,0,1]
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm8[1],ymm5[2,3,4,5,6,7,8],ymm8[9],ymm5[10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm7[0,1,2],ymm10[3],ymm7[4,5],ymm10[6],ymm7[7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm8[0,1,2,3,6,4,6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm8
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,1,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,5,7,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,4,5,2,3,0,1,14,15,28,29,28,29,28,29,28,29,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,4,5,2,3,0,1,14,15,28,29,28,29,28,29,28,29,20,21,18,19,16,17,30,31]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1],ymm5[2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm11[0],ymm14[1],ymm11[2,3,4],ymm14[5],ymm11[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1],xmm9[2],xmm8[3],xmm9[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm13[0,1],ymm7[2],ymm13[3,4],ymm7[5],ymm13[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm7
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm1[0,1],ymm7[2],ymm1[3,4],ymm7[5],ymm1[6,7]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm9
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,3,1,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,0,2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm15[2],ymm1[3,4,5],ymm15[6],ymm1[7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm13[0,1],ymm15[2],ymm13[3,4,5],ymm15[6],ymm13[7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,3,1]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[4,5,10,11,u,u,u,u,u,u,u,u,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm7[1,2],ymm1[3,4,5,6,7]
@@ -6724,15 +6733,15 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 ; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm1[0,1,2],ymm7[3,4,5,6,7],ymm1[8,9,10],ymm7[11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm18, %zmm25, %zmm5
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm25, %zmm5
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm5 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm21, %zmm1, %zmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm22
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm22, %zmm1, %zmm19
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm22, %zmm1, %zmm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, (%rsi)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm19, (%rdx)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, (%rdx)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm24, (%rcx)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm17, (%r8)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, (%r9)
@@ -6740,15 +6749,16 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, (%rax)
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, (%rax)
+; AVX512DQ-SLOW-NEXT:    popq %rax
 ; AVX512DQ-SLOW-NEXT:    vzeroupper
 ; AVX512DQ-SLOW-NEXT:    retq
 ;
 ; AVX512DQ-FAST-LABEL: load_i16_stride7_vf32:
 ; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm29
+; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm28
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm30
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm31 = [2,6,9,13,2,6,9,13]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm25
+; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm24
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [2,5,9,12,2,5,9,12]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [10,3,6,15,12,13,6,15]
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm2, %zmm0
@@ -6760,17 +6770,17 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm2, %zmm0
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm20, %zmm31, %zmm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <1,u,u,u,4,8,11,15>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm29, %zmm2, %zmm10
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm2, %zmm10
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <2,5,9,u,12,u,u,u>
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm2, %zmm2
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm20, %zmm19, %zmm13
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,u,u,u,4,7,11,14>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm29, %zmm3, %zmm14
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm3, %zmm14
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [8,1,12,5,12,5,14,15]
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm3, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermd %zmm25, %zmm18, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm28
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm28[0,1,0,2]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm24, %zmm18, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm27
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm27[0,1,0,2]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm4[0,1,2,3,4,5,6],ymm8[7]
@@ -6784,7 +6794,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vporq %ymm9, %ymm12, %ymm23
 ; AVX512DQ-FAST-NEXT:    vpbroadcastw 252(%rdi), %xmm9
 ; AVX512DQ-FAST-NEXT:    vmovdqa 224(%rdi), %xmm12
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm12[u,u,u,u,u,u,u,u,0,1,14,15,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm12[u,u,u,u,u,u,u,u,0,1,14,15,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm15[2],xmm9[2],xmm15[3],xmm9[3]
 ; AVX512DQ-FAST-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
@@ -6801,8 +6811,8 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31>
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm13, %ymm13
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm13[6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm26
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[0,1,6,7,8,9,18,19,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm25
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[0,1,6,7,8,9,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm3[0,1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm13
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm13[0],xmm11[1],xmm13[2,3,4,5],xmm11[6],xmm13[7]
@@ -6815,13 +6825,12 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3],xmm14[4],xmm11[5],xmm14[6],xmm11[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 240(%rdi), %xmm15
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[0,1,0,1,14,15,12,13,10,11,8,9,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29]
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm24 = [0,1,2,15,0,1,2,15]
-; AVX512DQ-FAST-NEXT:    # ymm24 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm5, %ymm24, %ymm11
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm15[0],xmm12[1],xmm15[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[2,3,0,1,14,15,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm11, %zmm27
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm11[0,1,2,3,4,5,6],ymm5[7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm15[0],xmm12[1],xmm15[2,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[2,3,0,1,14,15,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm5, %zmm26
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm9[0,1,2],ymm8[3],ymm9[4,5],ymm8[6],ymm9[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm11
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm11[0,1,2,3],xmm5[4],xmm11[5],xmm5[6],xmm11[7]
@@ -6831,7 +6840,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm10[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm6[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[2,3,4,5,10,11,16,17,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[2,3,4,5,10,11,16,17],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm11
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm11[1],xmm10[2,3,4,5],xmm11[6],xmm10[7]
@@ -6840,11 +6849,12 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm2[0,1,2],ymm13[3],ymm2[4,5],ymm13[6],ymm2[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm11
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm11[0],xmm6[1],xmm11[2,3,4,5],xmm6[6],xmm11[7]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm6[0,1,2,3,0,1,14,15,12,13,10,11,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [21474836482,21474836482,21474836482,21474836482]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm28, %ymm6, %ymm6
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm6, %ymm24, %ymm11
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[0,1,2,3,0,1,14,15,12,13,10,11,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm11 = [21474836482,21474836482,21474836482,21474836482]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm27, %ymm11, %ymm11
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm6[0,1,2,3,4,5,6],ymm11[7]
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm12[0],xmm15[0],xmm12[1],xmm15[1],xmm12[2],xmm15[2],xmm12[3],xmm15[3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm6, %xmm14
@@ -6853,7 +6863,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm8[0,1],ymm9[2],ymm8[3,4,5],ymm9[6],ymm8[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm14
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2,3],xmm14[4],xmm11[5],xmm14[6],xmm11[7]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm29, %zmm21, %zmm14
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm21, %zmm14
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[0,1,2,3,0,1,14,15,12,13,10,11,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm11[u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[2,3,16,17,22,23,24,25,30,31,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpor %ymm14, %ymm11, %ymm11
@@ -6866,19 +6876,20 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm10, %zmm0, %zmm17 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[4,5,10,11,0,1,22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[4,5,10,11,0,1,22,23],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm4[0],ymm3[1],ymm4[2,3,4],ymm3[5],ymm4[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm14
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm14[0],xmm11[1],xmm14[2],xmm11[3],xmm14[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[6,7,4,5,2,3,0,1,14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vporq %ymm10, %ymm11, %ymm20
+; AVX512DQ-FAST-NEXT:    vporq %ymm10, %ymm11, %ymm21
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm13[0],ymm2[1],ymm13[2,3],ymm2[4],ymm13[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm11
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm11[1],xmm10[2,3,4,5],xmm11[6],xmm10[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[0,1,4,5,2,3,0,1,14,15,12,13,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm28[0,1,1,3]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm27[0,1,1,3]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm11, %ymm24, %ymm10
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5,6],ymm11[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[12,13,10,11,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm10, %zmm11
 ; AVX512DQ-FAST-NEXT:    vmovdqa 416(%rdi), %ymm6
@@ -6890,10 +6901,10 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm7[1],xmm0[2,3,4,5],xmm7[6],xmm0[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <0,3,7,10,14,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm20, %zmm18, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [2,11,2,11,12,5,8,9]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [2,11,2,11,12,5,8,9]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm20
-; AVX512DQ-FAST-NEXT:    vpermd %zmm29, %zmm21, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm29, %zmm1
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm21, %zmm18, %zmm11
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,0,1,14,15,12,13,10,11,8,9]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[0,1,22,23,28,29,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -6917,8 +6928,9 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,6,7,4,5,2,3,0,1,14,15,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm2, %ymm24, %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm2[7]
 ; AVX512DQ-FAST-NEXT:    vpbroadcastw 232(%rdi), %xmm2
 ; AVX512DQ-FAST-NEXT:    vpsrlq $48, %xmm15, %xmm7
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3]
@@ -6931,22 +6943,22 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm13
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm13[1],xmm1[2,3,4,5],xmm13[6],xmm1[7]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm29, %zmm7, %zmm7
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm7, %zmm7
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,2,3,0,1,14,15,12,13,10,11]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[2,3,16,17,22,23,24,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm7[0,1,2],ymm0[3,4,5,6,7],ymm7[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
 ; AVX512DQ-FAST-NEXT:    vpor %ymm7, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vpermd %zmm25, %zmm31, %zmm14
+; AVX512DQ-FAST-NEXT:    vpermd %zmm24, %zmm31, %zmm14
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <0,4,7,11,14,u,u,u>
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm25, %zmm19, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermd %zmm24, %zmm19, %zmm1
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm2 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,0,3,7,u>
-; AVX512DQ-FAST-NEXT:    vpermd %ymm28, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <0,3,3,u,0,3,7,u>
+; AVX512DQ-FAST-NEXT:    vpermd %ymm27, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,0,1,6,7,8,9,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,1,0,1,6,7,8,9,14,15,u,u,u,u,u,u,16,17,16,17,22,23,24,25,30,31,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpsrld $16, %xmm12, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7]
@@ -6955,18 +6967,18 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm13
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm13, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm7, %zmm7
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[10,11,6,7,4,5,6,7,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm7[u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm7[u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm1[0,1],ymm5[2,3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0],ymm10[1],ymm6[2,3],ymm10[4],ymm6[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm5[1],xmm1[2,3,4,5],xmm5[6],xmm1[7]
 ; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [0,4,7,0,0,4,7,0]
 ; AVX512DQ-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm28, %ymm5, %ymm5
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm27, %ymm5, %ymm5
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,2,3,4,5,10,11,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm14[4,5,2,3,4,5,10,11,12,13,u,u,u,u,u,u,20,21,18,19,20,21,26,27,28,29,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm7[0,1,2,3,4],ymm5[5,6,7],ymm7[8,9,10,11,12],ymm5[13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm12[4],xmm15[4],xmm12[5],xmm15[5],xmm12[6],xmm15[6],xmm12[7],xmm15[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm16, %xmm12
@@ -6976,7 +6988,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0],ymm8[1],ymm9[2,3,4],ymm8[5],ymm9[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm14
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm14[0],xmm7[1],xmm14[2],xmm7[3],xmm14[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm29, %zmm12, %zmm12
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm12, %zmm12
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,4,5,2,3,0,1,14,15,12,13]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[0,1,18,19,20,21,26,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -6992,7 +7004,7 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2],ymm3[3],ymm4[4,5],ymm3[6],ymm4[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,10,11,6,7,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,10,11,6,7,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,4,6,7]
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
@@ -7003,9 +7015,9 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm9[0,1],ymm8[2],ymm9[3,4],ymm8[5],ymm9[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,3,1,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,8,9,4,5,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[8,9,8,9,4,5,6,7,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm29, %zmm3, %zmm3
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm3, %zmm3
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[4,5,10,11,u,u,u,u,u,u,u,u,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm4[1,2],ymm3[3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,6,7,4,5,2,3,0,1,14,15]
@@ -7016,10 +7028,10 @@ define void @load_i16_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm0 {%k1}
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm23, %zmm1, %zmm26
-; AVX512DQ-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm22, %zmm27
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm27, %zmm1, %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm26, (%rsi)
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm23, %zmm1, %zmm25
+; AVX512DQ-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm22, %zmm26
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm26, %zmm1, %zmm20
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, (%rsi)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, (%rdx)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, (%rcx)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, (%r8)
@@ -8786,9 +8798,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpsrld $16, %xmm0, %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa 160(%rdi), %xmm14
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm14[2,2,3,3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 160(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; AVX1-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -8805,18 +8817,18 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5],xmm1[6,7]
-; AVX1-ONLY-NEXT:    vmovdqa 80(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vmovdqa 80(%rdi), %xmm14
 ; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm2
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7]
-; AVX1-ONLY-NEXT:    vmovdqa 112(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm6[0,3,2,3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 112(%rdi), %xmm10
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm10[0,3,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
 ; AVX1-ONLY-NEXT:    vmovdqa (%rdi), %xmm3
@@ -8829,10 +8841,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
 ; AVX1-ONLY-NEXT:    vmovaps 32(%rdi), %xmm3
 ; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 48(%rdi), %xmm11
-; AVX1-ONLY-NEXT:    vinsertps {{.*#+}} xmm3 = zero,xmm3[2],xmm11[2],zero
-; AVX1-ONLY-NEXT:    vmovaps %xmm11, %xmm10
-; AVX1-ONLY-NEXT:    vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 48(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vinsertps {{.*#+}} xmm3 = zero,xmm3[2],xmm7[2],zero
+; AVX1-ONLY-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0,1,2],xmm3[3,4],xmm0[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm5 = [65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
 ; AVX1-ONLY-NEXT:    vandnps %ymm2, %ymm5, %ymm2
@@ -8873,9 +8884,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
 ; AVX1-ONLY-NEXT:    vmovaps 480(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 496(%rdi), %xmm11
-; AVX1-ONLY-NEXT:    vinsertps {{.*#+}} xmm3 = zero,xmm0[2],xmm11[2],zero
-; AVX1-ONLY-NEXT:    vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 496(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertps {{.*#+}} xmm3 = zero,xmm0[2],xmm3[2],zero
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3,4],xmm2[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa 528(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -8927,24 +8938,24 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[2,2,3,3]
 ; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX1-ONLY-NEXT:    vmovaps 256(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 272(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertps {{.*#+}} xmm3 = zero,xmm0[2],xmm3[2],zero
+; AVX1-ONLY-NEXT:    vmovaps 256(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vmovaps 272(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vinsertps {{.*#+}} xmm3 = zero,xmm6[2],xmm11[2],zero
+; AVX1-ONLY-NEXT:    vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3,4],xmm2[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa 304(%rdi), %xmm3
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 288(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; AVX1-ONLY-NEXT:    vmovdqa 320(%rdi), %xmm15
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm15[0,0,0,0]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 320(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[0,0,0,0]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,6],xmm4[7]
-; AVX1-ONLY-NEXT:    vmovdqa 336(%rdi), %xmm12
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm12[0,3,2,3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 336(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
 ; AVX1-ONLY-NEXT:    vandps %ymm5, %ymm2, %ymm2
@@ -8953,12 +8964,12 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 848(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vpsrld $16, %xmm8, %xmm1
-; AVX1-ONLY-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 832(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; AVX1-ONLY-NEXT:    vmovdqa 848(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vpsrld $16, %xmm15, %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 832(%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm12[2,2,3,3]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 ; AVX1-ONLY-NEXT:    vmovdqa 800(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -8979,15 +8990,15 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa 688(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[2,2,3,3]
+; AVX1-ONLY-NEXT:    vmovdqa 688(%rdi), %xmm13
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm13[2,2,3,3]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX1-ONLY-NEXT:    vmovaps 704(%rdi), %xmm13
-; AVX1-ONLY-NEXT:    vmovaps 720(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vinsertps {{.*#+}} xmm3 = zero,xmm13[2],xmm9[2],zero
+; AVX1-ONLY-NEXT:    vmovaps 704(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vmovaps 720(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vinsertps {{.*#+}} xmm3 = zero,xmm9[2],xmm8[2],zero
+; AVX1-ONLY-NEXT:    vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3,4],xmm2[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa 752(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -9009,8 +9020,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm0 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm0 = xmm14[4],mem[4],xmm14[5],mem[5],xmm14[6],mem[6],xmm14[7],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
@@ -9024,23 +9036,23 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
-; AVX1-ONLY-NEXT:    vpsrld $16, %xmm7, %xmm1
+; AVX1-ONLY-NEXT:    vpsrld $16, %xmm14, %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
-; AVX1-ONLY-NEXT:    vpsrld $16, %xmm6, %xmm2
+; AVX1-ONLY-NEXT:    vpsrld $16, %xmm10, %xmm2
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm2
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm10[4],xmm1[5],xmm10[5],xmm1[6],xmm10[6],xmm1[7],xmm10[7]
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,9,8,9,8,9,8,9,6,7,6,7,6,7,6,7]
 ; AVX1-ONLY-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm3, %xmm4
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0],xmm6[1],xmm7[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw $253, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm3 = mem[0],xmm7[1],mem[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[1,0,3,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm1[3,4],xmm3[5,6,7]
@@ -9052,11 +9064,11 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm10 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0]
 ; AVX1-ONLY-NEXT:    vandnps %ymm3, %ymm10, %ymm3
 ; AVX1-ONLY-NEXT:    vandps %ymm2, %ymm10, %ymm2
-; AVX1-ONLY-NEXT:    vorps %ymm3, %ymm2, %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+; AVX1-ONLY-NEXT:    vorps %ymm3, %ymm2, %ymm0
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm2 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
@@ -9065,18 +9077,19 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,0,3,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpslld $16, %xmm1, %xmm3
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpslld $16, %xmm0, %xmm3
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm3[6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm11[4],xmm0[5],xmm11[5],xmm0[6],xmm11[6],xmm0[7],xmm11[7]
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm3 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm4, %xmm1
 ; AVX1-ONLY-NEXT:    vpshufb %xmm4, %xmm3, %xmm3
-; AVX1-ONLY-NEXT:    vmovdqa %xmm4, %xmm11
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = xmm1[0],mem[1],xmm1[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm4 = xmm0[0],mem[1],xmm0[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,0,3,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3,4],xmm4[5,6,7]
@@ -9085,11 +9098,11 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6],xmm5[7]
-; AVX1-ONLY-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrld $16, %xmm1, %xmm5
+; AVX1-ONLY-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrld $16, %xmm0, %xmm5
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
 ; AVX1-ONLY-NEXT:    vandps %ymm3, %ymm14, %ymm3
 ; AVX1-ONLY-NEXT:    vandnps %ymm4, %ymm14, %ymm4
@@ -9097,11 +9110,11 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
 ; AVX1-ONLY-NEXT:    vandnps %ymm2, %ymm10, %ymm2
 ; AVX1-ONLY-NEXT:    vandps %ymm3, %ymm10, %ymm3
-; AVX1-ONLY-NEXT:    vorps %ymm2, %ymm3, %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+; AVX1-ONLY-NEXT:    vorps %ymm2, %ymm3, %ymm0
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm2 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
@@ -9115,25 +9128,25 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm3[6,7]
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
+; AVX1-ONLY-NEXT:    vpshufb %xmm1, %xmm3, %xmm3
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, %xmm6
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm3 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm3 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
-; AVX1-ONLY-NEXT:    vpshufb %xmm11, %xmm3, %xmm3
-; AVX1-ONLY-NEXT:    vmovdqa %xmm11, %xmm0
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = xmm1[0],mem[1],xmm1[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm4 = xmm0[0],mem[1],xmm0[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,0,3,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3,4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrld $16, %xmm1, %xmm4
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrld $16, %xmm0, %xmm4
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; AVX1-ONLY-NEXT:    vpslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm15[0,1,2,3,4,5]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6],xmm5[7]
-; AVX1-ONLY-NEXT:    vpsrld $16, %xmm12, %xmm5
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrld $16, %xmm0, %xmm5
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
 ; AVX1-ONLY-NEXT:    vandps %ymm3, %ymm14, %ymm3
 ; AVX1-ONLY-NEXT:    vandnps %ymm4, %ymm14, %ymm4
@@ -9141,28 +9154,26 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
 ; AVX1-ONLY-NEXT:    vandnps %ymm2, %ymm10, %ymm2
 ; AVX1-ONLY-NEXT:    vandps %ymm3, %ymm10, %ymm3
-; AVX1-ONLY-NEXT:    vorps %ymm2, %ymm3, %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm11[4],xmm8[4],xmm11[5],xmm8[5],xmm11[6],xmm8[6],xmm11[7],xmm8[7]
+; AVX1-ONLY-NEXT:    vorps %ymm2, %ymm3, %ymm0
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm12[4],xmm15[4],xmm12[5],xmm15[5],xmm12[6],xmm15[6],xmm12[7],xmm15[7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm15[0,1,2,3,4,5],xmm1[6],xmm15[7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm11[0,1,2,3,4,5],xmm15[6],xmm11[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,0,3,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpslld $16, %xmm8, %xmm3
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpslld $16, %xmm1, %xmm3
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm3[6,7]
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm13[4],xmm9[4],xmm13[5],xmm9[5],xmm13[6],xmm9[6],xmm13[7],xmm9[7]
-; AVX1-ONLY-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; AVX1-ONLY-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = xmm12[0],mem[1],xmm12[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm13[0],xmm12[1],xmm13[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[1,0,3,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3,4],xmm4[5,6,7]
@@ -9171,11 +9182,11 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm5[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm9[0,1,2,3,4,5]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1,2,3,4,5]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6],xmm5[7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrld $16, %xmm0, %xmm5
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrld $16, %xmm9, %xmm5
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
 ; AVX1-ONLY-NEXT:    vandps %ymm3, %ymm14, %ymm3
 ; AVX1-ONLY-NEXT:    vandnps %ymm4, %ymm14, %ymm4
@@ -9216,7 +9227,8 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm5 = mem[2,2,3,3]
 ; AVX1-ONLY-NEXT:    vpunpckhqdq {{.*#+}} xmm5 = xmm3[1],xmm5[1]
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX1-ONLY-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm3 = xmm7[0],mem[0],xmm7[1],mem[1],xmm7[2],mem[2],xmm7[3],mem[3]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3,4,5,6,7]
@@ -9276,14 +9288,14 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpsllq $16, %xmm2, %xmm2
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm11[4],xmm2[4],xmm11[5],xmm2[5],xmm11[6],xmm2[6],xmm11[7],xmm2[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[0,3,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm15[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,0,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm15[0,1],xmm4[2,3],xmm15[4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm11[0,1],xmm4[2,3],xmm11[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3]
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm4[6,7]
 ; AVX1-ONLY-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm4 = mem[0,1,0,3]
@@ -9291,18 +9303,18 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm5 = mem[2,2,3,3]
 ; AVX1-ONLY-NEXT:    vpunpckhqdq {{.*#+}} xmm4 = xmm4[1],xmm5[1]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[2,1,2,3]
+; AVX1-ONLY-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm0 = xmm12[0],mem[0],xmm12[1],mem[1],xmm12[2],mem[2],xmm12[3],mem[3]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm0[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm13[2,2,2,2]
 ; AVX1-ONLY-NEXT:    vpblendw $63, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm5 = mem[0,1,2,3,4,5],xmm5[6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm9[0,1,0,1]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm8[0,1,0,1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,6],xmm7[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[1,1,1,1]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm9[1,1,1,1]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm5, %ymm5
 ; AVX1-ONLY-NEXT:    vandps %ymm4, %ymm14, %ymm4
 ; AVX1-ONLY-NEXT:    vandnps %ymm5, %ymm14, %ymm5
@@ -9310,17 +9322,17 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
 ; AVX1-ONLY-NEXT:    vandnps %ymm2, %ymm10, %ymm2
 ; AVX1-ONLY-NEXT:    vandps %ymm4, %ymm10, %ymm4
-; AVX1-ONLY-NEXT:    vorps %ymm2, %ymm4, %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vorps %ymm2, %ymm4, %ymm0
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpsllq $16, %xmm0, %xmm2
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm9[0,3,2,3]
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,0,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm8[0,1],xmm4[2,3],xmm8[4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm8[0,3,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,0,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm0[0,1],xmm4[2,3],xmm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
@@ -9330,17 +9342,17 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm2 = mem[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[2,2,3,3]
+; AVX1-ONLY-NEXT:    vpshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = mem[2,2,3,3]
 ; AVX1-ONLY-NEXT:    vpunpckhqdq {{.*#+}} xmm5 = xmm2[1],xmm5[1]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm2 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[2,2,2,2]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm9[2,2,2,2]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm7 = xmm12[0,1,2,3,4,5],xmm7[6,7]
 ; AVX1-ONLY-NEXT:    vpshufd $68, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
@@ -9355,24 +9367,24 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
 ; AVX1-ONLY-NEXT:    vandnps %ymm4, %ymm10, %ymm4
 ; AVX1-ONLY-NEXT:    vandps %ymm5, %ymm10, %ymm5
-; AVX1-ONLY-NEXT:    vorps %ymm4, %ymm5, %ymm4
-; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = xmm4[0],mem[0],xmm4[1],mem[1],xmm4[2],mem[2],xmm4[3],mem[3]
+; AVX1-ONLY-NEXT:    vorps %ymm4, %ymm5, %ymm1
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm4 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,0,3,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $191, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = mem[0,1,2,3,4,5],xmm5[6],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw $64, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = xmm1[0,1,2,3,4,5],mem[6],xmm1[7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,0,0,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,6,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1,2],xmm5[3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[2,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm1[2,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,0,0,0]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm4[0,1,2,3,4,5],xmm5[6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,6,7]
@@ -9383,17 +9395,17 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,1,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,7,7,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm4 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,2,1]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,7,7]
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm15 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm15 = xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm4 = [4,5,2,3,4,5,6,7,8,9,4,5,8,9,2,3]
 ; AVX1-ONLY-NEXT:    vpshufb %xmm4, %xmm15, %xmm15
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm13, %xmm13
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm1, %xmm13
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm13, %ymm15, %ymm13
 ; AVX1-ONLY-NEXT:    vandps %ymm3, %ymm14, %ymm3
 ; AVX1-ONLY-NEXT:    vandnps %ymm13, %ymm14, %ymm13
@@ -9401,43 +9413,43 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
 ; AVX1-ONLY-NEXT:    vandnps %ymm5, %ymm10, %ymm5
 ; AVX1-ONLY-NEXT:    vandps %ymm3, %ymm10, %ymm3
-; AVX1-ONLY-NEXT:    vorps %ymm5, %ymm3, %ymm3
-; AVX1-ONLY-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vorps %ymm5, %ymm3, %ymm1
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm3 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm3 = xmm6[0],mem[0],xmm6[1],mem[1],xmm6[2],mem[2],xmm6[3],mem[3]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,0,3,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $64, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = xmm5[0,1,2,3,4,5],mem[6],xmm5[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw $64, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = xmm1[0,1,2,3,4,5],mem[6],xmm1[7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,0,0,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,6,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0],xmm3[1,2],xmm5[3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm5[2,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,0,0,0]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5],xmm13[6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm6 = mem[0,1,2,3,6,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,2,2,2]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm13 = xmm13[0],mem[1],xmm13[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw $253, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm13 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm13 = mem[0],xmm1[1],mem[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,1,1,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,7,7,7,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm13[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm13 = xmm13[4],mem[4],xmm13[5],mem[5],xmm13[6],mem[6],xmm13[7],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm13 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm13 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,2,1]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,4,7,7]
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm13 = xmm13[4],mem[4],xmm13[5],mem[5],xmm13[6],mem[6],xmm13[7],mem[7]
 ; AVX1-ONLY-NEXT:    vpshufb %xmm4, %xmm13, %xmm13
-; AVX1-ONLY-NEXT:    vmovdqa (%rsp), %xmm15 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm15, %xmm15
+; AVX1-ONLY-NEXT:    vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm1, %xmm15
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm15, %ymm13, %ymm13
 ; AVX1-ONLY-NEXT:    vandps %ymm6, %ymm14, %ymm6
 ; AVX1-ONLY-NEXT:    vandnps %ymm13, %ymm14, %ymm13
@@ -9445,30 +9457,31 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
 ; AVX1-ONLY-NEXT:    vandnps %ymm3, %ymm10, %ymm3
 ; AVX1-ONLY-NEXT:    vandps %ymm6, %ymm10, %ymm6
-; AVX1-ONLY-NEXT:    vorps %ymm3, %ymm6, %ymm3
-; AVX1-ONLY-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
+; AVX1-ONLY-NEXT:    vorps %ymm3, %ymm6, %ymm1
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,0,3,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $191, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm6 = mem[0,1,2,3,4,5],xmm6[6],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw $191, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm6 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm6 = mem[0,1,2,3,4,5],xmm0[6],mem[7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,0,0,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm3[1,2],xmm6[3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm11[4],xmm3[5],xmm11[5],xmm3[6],xmm11[6],xmm3[7],xmm11[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm11[4],xmm0[5],xmm11[5],xmm0[6],xmm11[6],xmm0[7],xmm11[7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm3[2,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,0,0,0]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5],xmm13[6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,2,2,2]
-; AVX1-ONLY-NEXT:    vpblendw $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm13 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm13 = xmm1[0],mem[1],xmm1[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw $253, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm13 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm13 = mem[0],xmm0[1],mem[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,1,1,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,7,7,7,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm13[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm13 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm13 = xmm12[4],xmm9[4],xmm12[5],xmm9[5],xmm12[6],xmm9[6],xmm12[7],xmm9[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,2,1]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,4,7,7]
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload
@@ -9485,9 +9498,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vandps %ymm2, %ymm10, %ymm2
 ; AVX1-ONLY-NEXT:    vorps %ymm6, %ymm2, %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm2 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,0,3,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
@@ -9511,16 +9524,16 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,1,1,0,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,7,7,7,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1],xmm13[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm13 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm13 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm13 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm13 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,1,2,1]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,4,7,7]
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm13 = xmm13[4],mem[4],xmm13[5],mem[5],xmm13[6],mem[6],xmm13[7],mem[7]
 ; AVX1-ONLY-NEXT:    vpshufb %xmm4, %xmm13, %xmm4
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm1, %xmm13
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm0, %xmm13
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm13, %ymm4, %ymm4
 ; AVX1-ONLY-NEXT:    vandps %ymm14, %ymm8, %ymm8
 ; AVX1-ONLY-NEXT:    vandnps %ymm4, %ymm14, %ymm1
@@ -9537,20 +9550,20 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm15[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,0,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = xmm4[0],mem[0],xmm4[1],mem[1]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm9[0],xmm4[1],xmm9[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0],xmm1[1,2],xmm4[3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd $100, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm4 = mem[0,1,2,1]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,4,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm1[0,1,2,3,4,5],xmm4[6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm1, %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm0, %xmm1
 ; AVX1-ONLY-NEXT:    vpshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm6 = mem[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrlq $16, %xmm6, %xmm6
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrlq $16, %xmm0, %xmm6
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm6 = xmm1[0,1],xmm6[2,3],xmm1[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
@@ -9560,10 +9573,10 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    # xmm1 = xmm1[2],mem[2],xmm1[3],mem[3]
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm7 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm7 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,u,u,u,u,0,1,4,5,8,9,6,7>
+; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm1 = [12,13,14,15,4,5,6,7,0,1,4,5,8,9,6,7]
 ; AVX1-ONLY-NEXT:    vpshufb %xmm1, %xmm7, %xmm7
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm8 = xmm9[2,3,2,3]
+; AVX1-ONLY-NEXT:    vpermilps $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm8 = mem[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm7, %ymm7
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
@@ -9575,11 +9588,11 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    # xmm4 = mem[1,1,1,1]
 ; AVX1-ONLY-NEXT:    vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm4 = xmm4[0,1],mem[2,3],xmm4[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm14[0,3,2,3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm12[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,1,0,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1,2],xmm6[3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,2,1]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,4,7]
@@ -9589,10 +9602,10 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm6 = mem[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrlq $16, %xmm6, %xmm6
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3]
+; AVX1-ONLY-NEXT:    vpsrlq $16, %xmm8, %xmm6
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3],xmm5[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm6 = mem[0,1,0,3]
@@ -9613,13 +9626,13 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm4 = mem[1,1,1,1]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm12[2,3],xmm4[4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm4 = xmm4[0,1],mem[2,3],xmm4[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd $236, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm5 = mem[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,1,0,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0],xmm4[1,2],xmm5[3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,7]
@@ -9658,8 +9671,8 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm6[0,3,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,0,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = xmm4[0],mem[0],xmm4[1],mem[1]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2],xmm4[3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,7]
@@ -9696,10 +9709,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3]
-; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm15 = [8,9,8,9,8,9,8,9,6,7,6,7,6,7,6,7]
-; AVX1-ONLY-NEXT:    vpshufb %xmm15, %xmm2, %xmm2
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3]
+; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm9 = [8,9,8,9,8,9,8,9,6,7,6,7,6,7,6,7]
+; AVX1-ONLY-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3,4],xmm1[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpblendw $191, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
@@ -9716,153 +9728,153 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1],xmm3[2],mem[2],xmm3[3],mem[3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,2,2]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm2[0,1],xmm3[2,3],xmm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpblendw $253, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = mem[0],xmm13[1],mem[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
-; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,7,7]
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7]
-; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,u,u,0,1,4,5,8,9,10,11>
-; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm4, %xmm4
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm9[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw $253, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm3 = mem[0],xmm13[1],mem[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
+; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,4,7,7]
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm3 = xmm3[4],mem[4],xmm3[5],mem[5],xmm3[6],mem[6],xmm3[7],mem[7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
 ; AVX1-ONLY-NEXT:    vandnps %ymm1, %ymm10, %ymm1
-; AVX1-ONLY-NEXT:    vandps %ymm3, %ymm10, %ymm3
-; AVX1-ONLY-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; AVX1-ONLY-NEXT:    vandps %ymm2, %ymm10, %ymm2
+; AVX1-ONLY-NEXT:    vorps %ymm1, %ymm2, %ymm1
 ; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm1, %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm15, %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm15, %xmm0
-; AVX1-ONLY-NEXT:    vpshufb %xmm15, %xmm3, %xmm3
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[3,4],xmm1[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm9[0,1,2,3,4,5],xmm14[6],xmm9[7]
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm11[0],xmm12[0],xmm11[1],xmm12[1],xmm11[2],xmm12[2],xmm11[3],xmm12[3]
+; AVX1-ONLY-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3,4],xmm1[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm12[0,1,2,3,4,5],xmm11[6],xmm12[7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
+; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,7,6]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrld $16, %xmm2, %xmm2
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm2 = xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7]
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
+; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,2,2]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0],xmm8[1],xmm0[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
-; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,7,6]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm3[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrld $16, %xmm3, %xmm3
+; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,4,7,7]
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm3 = xmm3[4],mem[4],xmm3[5],mem[5],xmm3[6],mem[6],xmm3[7],mem[7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3]
-; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,2,2,2]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $253, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = mem[0],xmm8[1],mem[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
-; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,4,7,7]
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7]
-; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm4, %xmm4
-; AVX1-ONLY-NEXT:    vmovdqa (%rsp), %xmm5 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm5[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
+; AVX1-ONLY-NEXT:    vmovdqa (%rsp), %xmm4 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
 ; AVX1-ONLY-NEXT:    vandnps %ymm1, %ymm10, %ymm1
-; AVX1-ONLY-NEXT:    vandps %ymm3, %ymm10, %ymm3
-; AVX1-ONLY-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; AVX1-ONLY-NEXT:    vandps %ymm2, %ymm10, %ymm2
+; AVX1-ONLY-NEXT:    vorps %ymm1, %ymm2, %ymm1
 ; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm12, %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm1, %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm2 = xmm14[0],mem[0],xmm14[1],mem[1],xmm14[2],mem[2],xmm14[3],mem[3]
+; AVX1-ONLY-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3,4],xmm1[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw $191, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm2 = mem[0,1,2,3,4,5],xmm2[6],mem[7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
+; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,7,6]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrld $16, %xmm2, %xmm2
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm2 = xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm11[0],xmm15[0],xmm11[1],xmm15[1],xmm11[2],xmm15[2],xmm11[3],xmm15[3]
-; AVX1-ONLY-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, %xmm12
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[3,4],xmm1[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $191, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm3 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm3 = mem[0,1,2,3,4,5],xmm0[6],mem[7]
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3]
+; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[2,2,2,2]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm3 = xmm3[0],mem[1],xmm3[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
-; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,7,6]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm3[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrld $16, %xmm0, %xmm3
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3]
-; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,2,2,2]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,4,7,7]
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm3 = xmm3[4],mem[4],xmm3[5],mem[5],xmm3[6],mem[6],xmm3[7],mem[7]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = xmm4[0],mem[1],xmm4[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
-; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,4,7,7]
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7]
-; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm4, %xmm4
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm5[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm4[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
 ; AVX1-ONLY-NEXT:    vandnps %ymm1, %ymm10, %ymm1
-; AVX1-ONLY-NEXT:    vandps %ymm3, %ymm10, %ymm3
-; AVX1-ONLY-NEXT:    vorps %ymm1, %ymm3, %ymm1
+; AVX1-ONLY-NEXT:    vandps %ymm2, %ymm10, %ymm2
+; AVX1-ONLY-NEXT:    vorps %ymm1, %ymm2, %ymm1
 ; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; AVX1-ONLY-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; AVX1-ONLY-NEXT:    vpshufb %xmm12, %xmm3, %xmm3
+; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm1, %xmm3
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrlq $48, %xmm1, %xmm4
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3,4],xmm3[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3,4],xmm4[5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw $191, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm3 = mem[0,1,2,3,4,5],xmm1[6],mem[7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
+; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,7,6]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $64, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = xmm1[0,1,2,3,4,5],mem[6],xmm1[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
-; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,7,6]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm4[5,6,7]
+; AVX1-ONLY-NEXT:    vpsrld $16, %xmm1, %xmm3
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm3 = xmm3[4],mem[4],xmm3[5],mem[5],xmm3[6],mem[6],xmm3[7],mem[7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrld $16, %xmm1, %xmm4
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
+; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,2,2,2]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw $253, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm4 = mem[0],xmm4[1],mem[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
+; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,4,7,7]
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm4 = xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
-; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,2,2,2]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $253, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = mem[0],xmm12[1],mem[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,0,3]
-; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,4,7,7]
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = xmm5[4],mem[4],xmm5[5],mem[5],xmm5[6],mem[6],xmm5[7],mem[7]
-; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,0,2]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm5[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm2, %ymm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX1-ONLY-NEXT:    vandnps %ymm3, %ymm10, %ymm3
-; AVX1-ONLY-NEXT:    vandps %ymm2, %ymm10, %ymm2
-; AVX1-ONLY-NEXT:    vorps %ymm3, %ymm2, %ymm2
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vandnps %ymm2, %ymm10, %ymm2
+; AVX1-ONLY-NEXT:    vandps %ymm3, %ymm10, %ymm3
+; AVX1-ONLY-NEXT:    vorps %ymm2, %ymm3, %ymm2
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vinsertps $41, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm3 = zero,xmm3[1],mem[0],zero
-; AVX1-ONLY-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = xmm13[2],mem[2],xmm13[3],mem[3]
+; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm4 = xmm13[2],xmm15[2],xmm13[3],xmm15[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3,4],xmm4[5,6,7]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm14[0],xmm9[1],xmm14[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm11[0],xmm12[1],xmm11[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,4,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm4[5,6,7]
@@ -9877,8 +9889,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3],xmm5[4,5,6,7]
-; AVX1-ONLY-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX1-ONLY-NEXT:    vpmovzxwd {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm8[0],xmm5[0],xmm8[1],xmm5[1],xmm8[2],xmm5[2],xmm8[3],xmm5[3]
 ; AVX1-ONLY-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm6 = mem[0,1,0,3]
@@ -9892,34 +9903,34 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vandnps %ymm3, %ymm10, %ymm3
 ; AVX1-ONLY-NEXT:    vandps %ymm4, %ymm10, %ymm4
 ; AVX1-ONLY-NEXT:    vorps %ymm3, %ymm4, %ymm3
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vinsertps $41, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = zero,xmm4[1],mem[0],zero
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = xmm5[2],mem[2],xmm5[3],mem[3]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vinsertps $41, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm4 = zero,xmm0[1],mem[0],zero
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = xmm0[2],mem[2],xmm0[3],mem[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3,4],xmm5[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = xmm5[0],mem[1],xmm5[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = xmm0[0],mem[1],xmm0[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,4,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm5[5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm5 = mem[1,1,1,1]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm6 = xmm6[4],mem[4],xmm6[5],mem[5],xmm6[6],mem[6],xmm6[7],mem[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm6 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm6 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3],xmm6[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
 ; AVX1-ONLY-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm7 = mem[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,4,7]
@@ -9932,24 +9943,26 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vandnps %ymm4, %ymm10, %ymm4
 ; AVX1-ONLY-NEXT:    vandps %ymm5, %ymm10, %ymm5
 ; AVX1-ONLY-NEXT:    vorps %ymm4, %ymm5, %ymm4
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vinsertps {{.*#+}} xmm5 = zero,xmm5[1],xmm15[1],zero
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm6 = xmm6[2],mem[2],xmm6[3],mem[3]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vinsertps $41, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = zero,xmm0[1],mem[0],zero
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm6 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm6 = xmm0[2],mem[2],xmm0[3],mem[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3,4],xmm6[5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm6 = xmm6[0],mem[1],xmm6[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpblendw $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm6 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm6 = xmm0[0],mem[1],xmm0[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,4,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm6[5,6,7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm11[1,1,1,1]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm14[1,1,1,1]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm7 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm7 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,1,2,3]
 ; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,3,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3],xmm7[4,5,6,7]
@@ -9969,16 +9982,16 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vandnps %ymm5, %ymm10, %ymm5
 ; AVX1-ONLY-NEXT:    vandps %ymm6, %ymm10, %ymm6
 ; AVX1-ONLY-NEXT:    vorps %ymm5, %ymm6, %ymm5
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vinsertps $41, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm6 = zero,xmm6[1],mem[0],zero
-; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm7 = xmm7[2],mem[2],xmm7[3],mem[3]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vinsertps $41, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm6 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm6 = zero,xmm0[1],mem[0],zero
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpckhdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm7 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm7 = xmm0[2],mem[2],xmm0[3],mem[3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0,1,2],xmm6[3,4],xmm7[5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vpblendw $253, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm7 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm7 = mem[0],xmm0[1],mem[2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm7 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm7 = xmm0[0],mem[1],xmm0[2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,4,7]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm7[5,6,7]
@@ -9994,7 +10007,8 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0,1],xmm7[2,3],xmm8[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm8 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm12[0],xmm8[0],xmm12[1],xmm8[1],xmm12[2],xmm8[2],xmm12[3],xmm8[3]
+; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
 ; AVX1-ONLY-NEXT:    vpshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm9 = mem[0,1,0,3]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,4,7]
@@ -11408,7 +11422,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1],ymm9[2],ymm10[3,4],ymm9[5],ymm10[6,7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
@@ -11433,7 +11447,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm8[0,1],ymm7[2],ymm8[3,4],ymm7[5],ymm8[6,7]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm3[8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm3[8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm3
 ; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
@@ -11459,7 +11473,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
 ; AVX2-FAST-NEXT:    vpblendd $219, (%rsp), %ymm12, %ymm2 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm2 = mem[0,1],ymm12[2],mem[3,4],ymm12[5],mem[6,7]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vpshufb %xmm5, %xmm2, %xmm3
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX2-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
@@ -11511,8 +11525,8 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm3[5,6,7],ymm5[8,9,10,11,12],ymm3[13,14,15]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1],ymm9[2,3],ymm10[4,5],ymm9[6,7]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm3
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm3[8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm3[8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm14[0,1,2],ymm13[3],ymm14[4,5],ymm13[6],ymm14[7]
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <0,4,7,3,6,u,u,u>
@@ -11536,9 +11550,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0,1,2,3,4],ymm0[5,6,7],ymm5[8,9,10,11,12],ymm0[13,14,15]
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1],ymm7[2,3],ymm8[4,5],ymm7[6,7]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm9
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vpshufb %xmm8, %xmm9, %xmm9
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm5
 ; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3]
 ; AVX2-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
@@ -11588,8 +11602,8 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpblendd $51, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm1 = mem[0,1],ymm1[2,3],mem[4,5],ymm1[6,7]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; AVX2-FAST-NEXT:    vpblendd $183, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm2 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm2 = mem[0,1,2],ymm15[3],mem[4,5],ymm15[6],mem[7]
@@ -11615,7 +11629,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vpblendd $72, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-NEXT:    # ymm1 = ymm1[0,1,2],mem[3],ymm1[4,5],mem[6],ymm1[7]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,u,u,u,u,10,11,6,7,u,u,u,u>
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [0,1,2,3,4,5,6,7,10,11,6,7,4,5,6,7]
 ; AVX2-FAST-NEXT:    vpshufb %xmm11, %xmm3, %xmm3
 ; AVX2-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,4,6,7]
 ; AVX2-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
@@ -12220,7 +12234,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1],ymm8[2],ymm5[3,4],ymm8[5],ymm5[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm6 = xmm3[8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm6 = xmm3[8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm3, %xmm3
 ; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
@@ -12247,7 +12261,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu (%rsp), %ymm14 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm3 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm3 = ymm14[0,1],mem[2],ymm14[3,4],mem[5],ymm14[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm0 = <8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm0 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm0, %xmm3, %xmm4
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm3, %xmm3
 ; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
@@ -12309,7 +12323,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd $36, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm2 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm2 = ymm4[0,1],mem[2],ymm4[3,4],mem[5],ymm4[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm6 = <8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm6 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm2, %xmm3
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
@@ -12326,7 +12340,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm6, %xmm2, %xmm3
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %xmm6, %xmm12
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm2 = <10,11,6,7,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm2 = [10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,2,3,0,1,14,15,12,13,26,27,26,27,26,27,26,27,18,19,16,17,30,31,28,29>
@@ -12431,7 +12445,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd $204, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm1 = ymm4[0,1],mem[2,3],ymm4[4,5],mem[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm1, %xmm6
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[8,9,4,5,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
 ; AVX2-FAST-PERLANE-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
@@ -12452,7 +12466,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd $183, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX2-FAST-PERLANE-NEXT:    # ymm1 = mem[0,1,2],ymm1[3],mem[4,5],ymm1[6],mem[7]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,u,u,10,11,6,7,u,u,u,u>
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,2,3,4,5,6,7,10,11,6,7,4,5,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
 ; AVX2-FAST-PERLANE-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,4,6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
@@ -13504,446 +13518,451 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    subq $1736, %rsp # imm = 0x6C8
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = <2,5,9,u,12,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm20, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = <2,5,9,u,12,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm19, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [8,1,12,5,12,5,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm4, %zmm3, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, %zmm25
 ; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm0, %zmm3, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 480(%rdi), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 480(%rdi), %ymm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 448(%rdi), %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1],ymm0[2],ymm6[3,4,5],ymm0[6],ymm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm6, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1],ymm5[2],ymm6[3,4,5],ymm5[6],ymm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm17
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4],xmm3[5],xmm5[6],xmm3[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,6,7,12,13,2,3,16,17,30,31,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vporq %ymm4, %ymm6, %ymm25
+; AVX512F-ONLY-FAST-NEXT:    vporq %ymm4, %ymm6, %ymm16
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 672(%rdi), %xmm7
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,0,1,14,15,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm7, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm7, %xmm18
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm7, %xmm20
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 700(%rdi), %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm30 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm31
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm31[0,1,0,2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm18
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm18[0,1,0,2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1],ymm9[2],ymm6[3,4,5],ymm9[6],ymm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1],ymm12[2],ymm6[3,4,5],ymm12[6],ymm6[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm6, %ymm7
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 224(%rdi), %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm14, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 224(%rdi), %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm13, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 252(%rdi), %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 240(%rdi), %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,0,1,6,7,8,9,18,19,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2],ymm9[3],ymm7[4,5],ymm9[6],ymm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm21
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3,4,5],xmm1[6],xmm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 240(%rdi), %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,0,1,6,7,8,9,18,19,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2],ymm12[3],ymm7[4,5],ymm12[6],ymm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm7, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm1[1],xmm5[2,3,4,5],xmm1[6],xmm5[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <2,3,0,1,14,15,12,13,10,11,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm3, %ymm4, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 160(%rdi), %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm4, %ymm5, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 160(%rdi), %ymm10
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 128(%rdi), %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm11[0,1],ymm4[2],ymm11[3,4,5],ymm4[6],ymm11[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm3[0,1,2,3],xmm4[4],xmm3[5],xmm4[6],xmm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm13, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm27 = [0,1,2,15,0,1,2,15]
-; AVX512F-ONLY-FAST-NEXT:    # ymm27 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm6, %ymm27, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm12[0],xmm14[1],xmm12[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [2,3,0,1,14,15,14,15,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm8, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm15[0,1,2],ymm0[3],ymm15[4,5],ymm0[6],ymm15[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm15, %ymm16
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0],xmm7[1],xmm8[2,3,4,5],xmm7[6],xmm8[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm22, %zmm20, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm11[0,1],ymm10[2],ymm11[3,4,5],ymm10[6],ymm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4],xmm4[5],xmm6[6],xmm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm6, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm9, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm14[0],xmm13[1],xmm14[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [2,3,0,1,14,15,14,15,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm9, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm6, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm0[0,1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm17
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm9[0],xmm6[1],xmm9[2,3,4,5],xmm6[6],xmm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm22, %zmm19, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 608(%rdi), %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 576(%rdi), %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1],ymm10[2],ymm13[3,4,5],ymm10[6],ymm13[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm7[4],xmm2[5],xmm7[6],xmm2[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 640(%rdi), %ymm20
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm20[0,1,0,2]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm4, %ymm27, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 688(%rdi), %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm15[0],xmm0[1],xmm15[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm4, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <2,6,9,u,13,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm3, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm21, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm9[0],ymm2[1],ymm9[2,3],ymm2[4],ymm9[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm9, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 608(%rdi), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 576(%rdi), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm1[0,1],ymm0[2],ymm1[3,4,5],ymm0[6],ymm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm24
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1],xmm3[2,3,4,5],xmm6[6],xmm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm26, %zmm4, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,2,3,4,5,10,11,16,17,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm7, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <4,5,2,3,0,1,14,15,12,13,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm3, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm7, %ymm3, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm11[0,1,2],ymm1[3],ymm11[4,5],ymm1[6],ymm11[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm11, %ymm21
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0],xmm3[1],xmm7[2,3,4,5],xmm3[6],xmm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm1, %xmm19
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm6[4],xmm3[5],xmm6[6],xmm3[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 640(%rdi), %ymm26
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm26[0,1,0,2]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm0, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0,1,2,3,4,5,6],ymm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 688(%rdi), %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm20, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0],xmm6[1],xmm5[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm3, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <2,6,9,u,13,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0],ymm15[1],ymm12[2,3],ymm15[4],ymm12[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm12, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm7[1],xmm4[2,3,4,5],xmm7[6],xmm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm25, %zmm3, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,2,3,4,5,10,11,16,17,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm8, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <4,5,2,3,0,1,14,15,12,13,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm4, %ymm8, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm11[0,1,2],ymm10[3],ymm11[4,5],ymm10[6],ymm11[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm10, %ymm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm11, %ymm25
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm8[0],xmm4[1],xmm8[2,3,4,5],xmm4[6],xmm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm11 = [21474836482,21474836482,21474836482,21474836482]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm31, %ymm11, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm3, %ymm27, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm14[0],xmm12[0],xmm14[1],xmm12[1],xmm14[2],xmm12[2],xmm14[3],xmm12[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm12, %xmm29
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm3, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm9, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm18, %ymm11, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm4[0,1,2,3,4,5,6],ymm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm13[0],xmm14[0],xmm13[1],xmm14[1],xmm13[2],xmm14[2],xmm13[3],xmm14[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm14, %xmm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm4, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm12, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm13
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm17, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm16, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm14[0],ymm1[1],ymm14[2,3],ymm1[4],ymm14[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm12[1],xmm9[2,3,4,5],xmm12[6],xmm9[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm9, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm22, %zmm4, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm2[0],ymm13[1],ymm2[2,3],ymm13[4],ymm2[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm2, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0],xmm14[1],xmm12[2,3,4,5],xmm14[6],xmm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm12, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm22, %zmm3, %zmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm4, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm4, %ymm8, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm13[0,1,2],ymm10[3],ymm13[4,5],ymm10[6],ymm13[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm10, %ymm17
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1],xmm6[2,3,4,5],xmm4[6],xmm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm19, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm4, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm20, %ymm11, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm6, %ymm27, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm8, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm3, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm3, %ymm9, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm12[0,1,2],ymm2[3],ymm12[4,5],ymm2[6],ymm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0],xmm3[1],xmm7[2,3,4,5],xmm3[6],xmm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm3, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm26, %ymm11, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm28
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm20, %xmm17
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm0[0],ymm15[1],ymm0[2,3,4],ymm15[5],ymm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm8[0],xmm3[1],xmm8[2],xmm3[3],xmm8[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [10,3,6,15,12,13,6,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm20, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,4,5,10,11,0,1,22,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm9, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm3, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm3, %ymm9, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0],ymm11[1],ymm5[2,3],ymm11[4],ymm5[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm9[1],xmm3[2,3,4,5],xmm9[6],xmm3[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,4,5,2,3,0,1,14,15,12,13,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25>
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm18[0,1,1,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm3, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm15[0,1,2,3,4,5,6],ymm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [12,13,10,11,12,13,10,11,12,13,10,11,12,13,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm6, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0],ymm2[1],ymm5[2,3,4],ymm2[5],ymm5[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0],ymm13[1],ymm1[2,3,4],ymm13[5],ymm1[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1],xmm6[2],xmm4[3],xmm6[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [10,3,6,15,12,13,6,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm26, %zmm11, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,4,5,10,11,0,1,22,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm6, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm22, %zmm20, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm6, %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm6, %ymm4, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm10[0],ymm5[1],ymm10[2,3],ymm5[4],ymm10[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1],xmm4[2,3,4,5],xmm6[6],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,4,5,2,3,0,1,14,15,12,13,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm4, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25>
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm31[0,1,1,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm12, %ymm27, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [12,13,10,11,12,13,10,11,12,13,10,11,12,13,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm3, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0],ymm1[1],ymm14[2,3,4],ymm1[5],ymm14[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1],xmm3[2],xmm0[3],xmm3[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm22, %zmm11, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm3, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm3, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [3,6,10,13,3,6,10,13]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm23, %zmm1, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm2, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm21
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5,6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm26[0,1,1,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm4, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm7, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0],ymm11[1],ymm5[2,3,4],ymm11[5],ymm5[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2],xmm5[3],xmm6[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm3, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5,6],ymm3[7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 232(%rdi), %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm19, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpsrlq $48, %xmm19, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm3, %zmm30
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm25 = [3,6,10,13,3,6,10,13]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm19, %zmm25, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm6, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5,6],ymm3[7]
 ; AVX512F-ONLY-FAST-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm30, %zmm0, %zmm25 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm13[1],ymm1[2,3],ymm13[4],ymm1[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2,3,4,5],xmm2[6],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm20[0,1,1,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm7, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm27, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm8, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0],ymm5[1],ymm10[2,3,4],ymm5[5],ymm10[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm5[0],xmm0[1],xmm5[2],xmm0[3],xmm5[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm27, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm13[1],ymm1[2,3,4],ymm13[5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm16 {%k1} # 16-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm7, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm1, %ymm27, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm2, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 232(%rdi), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm29, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpsrlq $48, %xmm29, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm5, %zmm28
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastw 680(%rdi), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm15, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpsrlq $48, %xmm15, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm28, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpsrlq $48, %xmm28, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm24
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [0,3,3,3,0,3,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm31, %ymm9, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [2,5,9,12,2,5,9,12]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm0, %zmm5, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,1,0,1,6,7,8,9,14,15,14,15,14,15,14,15,16,17,16,17,22,23,24,25,30,31,30,31,30,31,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm2, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2,3,4],ymm1[5,6,7],ymm2[8,9,10,11,12],ymm1[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm18, %ymm9, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [2,5,9,12,2,5,9,12]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm1, %zmm8, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,0,1,6,7,8,9,14,15,14,15,14,15,14,15,16,17,16,17,22,23,24,25,30,31,30,31,30,31,30,31]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm7, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm7[0,1,2,3,4],ymm3[5,6,7],ymm7[8,9,10,11,12],ymm3[13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpsrld $16, %xmm13, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm29, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm1, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 704(%rdi), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 736(%rdi), %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm2[2,3],ymm6[4,5],ymm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3],xmm2[4],xmm1[5],xmm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,10,11,8,9,6,7,4,5,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <0,u,u,u,4,7,11,14>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm6, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm12, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm12[0,1,2],xmm1[3,4,5,6],xmm12[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm12[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm11, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm0, %zmm14, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm12, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm12[0,1,2,3,4,5,6],ymm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm11 {%k1} # 16-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm20, %ymm9, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm23, %zmm5, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm4, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0,1,2,3,4],ymm3[5,6,7],ymm4[8,9,10,11,12],ymm3[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm16 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpsrld $16, %xmm16, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm10[4],xmm4[5],xmm10[5],xmm4[6],xmm10[6],xmm4[7],xmm10[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm10, %xmm19
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpsrld $16, %xmm13, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm7[4],xmm12[4],xmm7[5],xmm12[5],xmm7[6],xmm12[6],xmm7[7],xmm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm7, %zmm3, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 704(%rdi), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 736(%rdi), %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm7[0,1],ymm0[2,3],ymm7[4,5],ymm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm27
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0,1,2],xmm3[3],xmm7[4],xmm3[5],xmm7[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,10,11,8,9,6,7,4,5,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm0, %xmm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <0,u,u,u,4,7,11,14>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm14, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0,1,2],xmm3[3,4,5,6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm3[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm1, %zmm25, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm5, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5,6],ymm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm3 {%k1} # 16-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [0,4,7,0,0,4,7,0]
-; AVX512F-ONLY-FAST-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm31, %ymm14, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm26, %ymm9, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm19, %zmm8, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm2, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7],ymm2[8,9,10,11,12],ymm0[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpsrld $16, %xmm17, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [0,4,7,0,0,4,7,0]
+; AVX512F-ONLY-FAST-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm18, %ymm10, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm23
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [2,6,9,13,2,6,9,13]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm0, %zmm11, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm1, %zmm11, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [4,5,2,3,4,5,10,11,12,13,12,13,12,13,12,13,20,21,18,19,20,21,26,27,28,29,28,29,28,29,28,29]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0,1,2,3,4],ymm3[5,6,7],ymm4[8,9,10,11,12],ymm3[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0,1,2,3,4],ymm2[5,6,7],ymm4[8,9,10,11,12],ymm2[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm18 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm0, %xmm22
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm9, %zmm5, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm3, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm3[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm3, %zmm8, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3,4,5],ymm4[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 288(%rdi), %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1],ymm3[2,3],ymm4[4,5],ymm3[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm15[0,1,2],xmm1[3],xmm15[4],xmm1[5],xmm15[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm6, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3,4,5,6],xmm2[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm2, %zmm5, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm5, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm5[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 288(%rdi), %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm12[0,1],ymm1[2,3],ymm12[4,5],ymm1[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0,1,2],xmm5[3],xmm7[4],xmm5[5],xmm7[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm16, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm5, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm14, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm15, %ymm14, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm14[0,1,2],xmm7[3,4,5,6],xmm14[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm14[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm14, %zmm8, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm8, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3,4,5],ymm8[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2],ymm3[3],ymm4[4,5],ymm3[6],ymm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm4, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm3, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0,1,2,3],xmm1[4],xmm5[5],xmm1[6],xmm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <1,u,u,u,4,8,11,15>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm6, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm7, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm7[0,1,2],xmm1[3,4,5,6],xmm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm2, %zmm11, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm7[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm7[0,1,2,3],xmm1[4],xmm7[5],xmm1[6],xmm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm6, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm6, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0,1,2],xmm1[3,4,5,6],xmm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm9, %zmm11, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm12[0,1,2],ymm1[3],ymm12[4,5],ymm1[6],ymm12[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm1, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0,1,2,3],xmm7[4],xmm8[5],xmm7[6],xmm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <1,u,u,u,4,8,11,15>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm15, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm0[0,1,2],xmm7[3,4,5,6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm14, %zmm11, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm7[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm4[3],ymm2[4,5],ymm4[6],ymm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0,1,2,3],xmm0[4],xmm7[5],xmm0[6],xmm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm15, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm7, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm6[0,1,2],xmm0[3,4,5,6],xmm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm3, %zmm11, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1],ymm12[2],ymm8[3,4,5],ymm12[6],ymm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1],ymm12[2],ymm5[3,4,5],ymm12[6],ymm5[7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,1,2,3,0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <1,u,u,u,5,8,12,15>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm5, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,30,31,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm6, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm6, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm2, %zmm26, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm2, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm2[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <1,u,u,u,5,8,12,15>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm6, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,30,31,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm7, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm7, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm14, %zmm25, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm7, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm7[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm3[2],ymm4[3,4,5],ymm3[6],ymm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4],xmm0[5],xmm2[6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm2[2],ymm4[3,4,5],ymm2[6],ymm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm7[4],xmm0[5],xmm7[6],xmm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm5, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm6, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm1, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm1, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm9, %zmm26, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm3, %zmm25, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm1, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm20, %ymm14, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm26, %ymm10, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm23, %zmm11, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm19, %zmm11, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm16, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm19, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm17, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm28, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm22, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 416(%rdi), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdi), %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1],ymm2[2],ymm15[3,4,5],ymm2[6],ymm15[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm2, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 416(%rdi), %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 384(%rdi), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm13[2],ymm2[3,4,5],ymm13[6],ymm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm2, %ymm7
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0,1,2],ymm12[3],ymm8[4,5],ymm12[6],ymm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2],ymm12[3],ymm5[4,5],ymm12[6],ymm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm15
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3,4,5],xmm1[6],xmm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,0,1,0,1,0,1,14,15,12,13,10,11,8,9]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [2,11,2,11,12,5,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm9, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,22,23,28,29,18,19,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm7, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm7[0,1,2],ymm0[3,4,5,6,7],ymm7[8,9,10],ymm0[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm9, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,22,23,28,29,18,19,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm8, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0,1,2],ymm0[3,4,5,6,7],ymm8[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm7, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm1, %ymm8, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 864(%rdi), %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 832(%rdi), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm5[2],ymm1[3,4,5],ymm5[6],ymm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm1, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 864(%rdi), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 832(%rdi), %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1],ymm1[2],ymm8[3,4,5],ymm1[6],ymm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm1, %ymm5
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm9, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2],ymm10[3],ymm4[4,5],ymm10[6],ymm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm9, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2],ymm14[3],ymm4[4,5],ymm14[6],ymm4[7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm9
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm9[0],xmm2[1],xmm9[2,3,4,5],xmm2[6],xmm9[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -13957,34 +13976,34 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm0, %xmm25
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm26 = <0,3,7,10,14,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm26, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = <0,3,7,10,14,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm19, %zmm9
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm9, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm9[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm20, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm15[0,1,2],ymm14[3],ymm15[4,5],ymm14[6],ymm15[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm14, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm18, %zmm30
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2],ymm13[3],ymm7[4,5],ymm13[6],ymm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm21
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm9
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm9[0],xmm1[1],xmm9[2,3,4,5],xmm1[6],xmm9[7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm12[0],ymm8[1],ymm12[2,3],ymm8[4],ymm12[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm12, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm8, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm13[1],xmm9[2,3,4,5],xmm13[6],xmm9[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [2,3,2,3,2,3,2,3,0,1,14,15,12,13,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm12[0],ymm15[1],ymm12[2,3],ymm15[4],ymm12[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm12, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm15, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm14[1],xmm9[2,3,4,5],xmm14[6],xmm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [2,3,2,3,2,3,2,3,0,1,14,15,12,13,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = <2,u,u,u,6,9,13,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm23, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <2,u,u,u,6,9,13,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm15, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,128,128,128,128,128,128,128,128,128,128]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
@@ -13995,29 +14014,28 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm5, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm25, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm5, %zmm26, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm1, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm25, %zmm19, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm20, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2],ymm7[3],ymm6[4,5],ymm7[6],ymm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm6, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm18, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm8, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2],ymm5[3],ymm8[4,5],ymm5[6],ymm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm8
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5],xmm0[6],xmm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm23, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm15, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0],ymm6[1],ymm7[2,3],ymm6[4],ymm7[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm9
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm9[1],xmm2[2,3,4,5],xmm9[6],xmm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
@@ -14030,183 +14048,180 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm11, %ymm27
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm10, %ymm28
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm8, %xmm25
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm29
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm30
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = <0,4,7,11,14,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm23, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = <0,4,7,11,14,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm19, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, %zmm26
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29>
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm2, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm0[0,1],ymm2[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0],ymm15[1],ymm8[2,3],ymm15[4],ymm8[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm13[1],xmm0[2,3,4,5],xmm13[6],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm13 = ymm1[0],ymm14[1],ymm1[2,3,4],ymm14[5],ymm1[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm14, %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm14[0],xmm13[1],xmm14[2],xmm13[3],xmm14[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [4,5,4,5,4,5,4,5,2,3,0,1,14,15,12,13]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0],ymm0[1],ymm13[2,3],ymm0[4],ymm13[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm13, %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm24
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm14[1],xmm0[2,3,4,5],xmm14[6],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm13[0],ymm12[1],ymm13[2,3,4],ymm12[5],ymm13[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm15[0],xmm14[1],xmm15[2],xmm14[3],xmm15[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [4,5,4,5,4,5,4,5,2,3,0,1,14,15,12,13]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <3,u,u,u,6,10,13,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm10, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm10, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,18,19,20,21,26,27,128,128,128,128,128,128,128,128,128,128]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm13, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm14, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm31
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm3[2,3],ymm4[4,5],ymm3[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm4, %ymm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm21
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm25, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm29, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm30, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm16 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm5, %zmm23, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm17 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm25, %zmm19, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0],ymm12[1],ymm4[2,3],ymm12[4],ymm4[5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm12, %ymm25
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm22
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm11
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm11[1],xmm0[2,3,4,5],xmm11[6],xmm0[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm11 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm10, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm10, %zmm10
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm10, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm7[0],ymm6[1],ymm7[2,3,4],ymm6[5],ymm7[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm7, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm21
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm14[0],xmm10[1],xmm14[2],xmm10[3],xmm14[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm6[0],ymm5[1],ymm6[2,3,4],ymm5[5],ymm6[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm15[0],xmm10[1],xmm15[2],xmm10[3],xmm15[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm9[0,1,2],ymm0[3,4,5,6,7],ymm9[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm9, %ymm10, %ymm9
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm29
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm30
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm28, %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [0,1,2,3,4,5,6,7,10,11,6,7,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm10, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [0,1,2,3,4,5,6,7,10,11,6,7,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm10, %xmm10
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <1,4,8,11,15,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm19, %zmm10, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm13, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm13[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0],ymm15[1],ymm8[2,3,4],ymm15[5],ymm8[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1],xmm6[2],xmm5[3],xmm6[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [8,9,8,9,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm3, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm26, %zmm10, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm27 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm14, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm14[2,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm3[0],ymm5[1],ymm3[2,3,4],ymm5[5],ymm3[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2],xmm6[3],xmm7[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [8,9,8,9,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm3, %xmm7
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,3,1,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,1,10,3,14,7,10,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm17, %zmm6, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [4,5,10,11,0,1,10,11,0,1,4,5,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm13, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm8, %ymm17
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm13[0],ymm3[1,2],ymm13[3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [0,1,2,3,12,13,6,7,4,5,2,3,0,1,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm5, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm3[0,1,2],ymm5[3,4,5,6,7],ymm3[8,9,10],ymm5[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm3[0,1,2,3],ymm5[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2],ymm8[3],ymm3[4,5],ymm8[6],ymm3[7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [0,1,10,3,14,7,10,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm7, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [4,5,10,11,0,1,10,11,0,1,4,5,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm5, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0],ymm3[1,2],ymm5[3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,2,3,12,13,6,7,4,5,2,3,0,1,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm6, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm3[0,1,2],ymm6[3,4,5,6,7],ymm3[8,9,10],ymm6[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm3[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3],ymm4[4,5],ymm3[6],ymm4[7]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm9, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm9, %xmm9
 ; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,4,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm9 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm23, %zmm10, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm10, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm25, %zmm10, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm10, %ymm10
 ; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm10[2,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm4[0],ymm7[1],ymm4[2,3,4],ymm7[5],ymm4[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0],xmm7[1],xmm8[2],xmm7[3],xmm8[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm7, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm21, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm12[0,1],ymm4[2],ymm12[3,4],ymm4[5],ymm12[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm15, %xmm8, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0],ymm4[1],ymm8[2,3,4],ymm4[5],ymm8[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm10[0],xmm8[1],xmm10[2],xmm8[3],xmm10[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm8, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm4[0,1],ymm8[2],ymm4[3,4],ymm8[5],ymm4[6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm8, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm8
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,3,1,3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm30, %zmm6, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm6, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm29, %zmm7, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm7, %ymm7
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm12 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm13 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm16 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm17 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm11 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm12, %zmm10, %zmm9
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm13, %zmm10, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0],ymm4[1,2],ymm6[3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm4[0,1,2],ymm6[3,4,5,6,7],ymm4[8,9,10],ymm6[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm7[0],ymm4[1,2],ymm7[3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm4[0,1,2],ymm5[3,4,5,6,7],ymm4[8,9,10],ymm5[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm12 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm12 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm7 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, (%rsp), %zmm0, %zmm10 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm13 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm14 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm15 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm31, %zmm0, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm29, %zmm0, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm31, %zmm0, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm6
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    movw $-512, %ax # imm = 0xFE00
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm6, %zmm12 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm6 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm7, %zmm6 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm5, %zmm12 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm5 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm7, %zmm5 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm7 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm7 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm10, %zmm7 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm10 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm10 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm13, %zmm10 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm14, %zmm13 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm15, %zmm14 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm1 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm1 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm19, %zmm1 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, (%rsi)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 64(%rsi)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 64(%rsi)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 64(%rdx)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, (%rdx)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 64(%rcx)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 64(%rcx)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, (%rcx)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 64(%r8)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, (%r8)
@@ -14214,15 +14229,15 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, (%r9)
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm2 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm17, %zmm2 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm2 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm16, %zmm2 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm3 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm3 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm4, %zmm3 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm0 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm5, %zmm0 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm0 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm6, %zmm0 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512F-ONLY-FAST-NEXT:    addq $1736, %rsp # imm = 0x6C8
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
@@ -15103,20 +15118,19 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512DQ-FAST-LABEL: load_i16_stride7_vf64:
 ; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $1288, %rsp # imm = 0x508
-; AVX512DQ-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <2,5,9,u,12,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm4, %zmm0, %zmm1
+; AVX512DQ-FAST-NEXT:    subq $1224, %rsp # imm = 0x4C8
+; AVX512DQ-FAST-NEXT:    vmovdqa64 512(%rdi), %zmm18
+; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = <2,5,9,u,12,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm17, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [8,1,12,5,12,5,14,15]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm4, %zmm3, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, %zmm25
-; AVX512DQ-FAST-NEXT:    vpermd %zmm5, %zmm3, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm18
+; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm3, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm19
+; AVX512DQ-FAST-NEXT:    vpermd %zmm18, %zmm3, %zmm4
 ; AVX512DQ-FAST-NEXT:    vmovdqa 480(%rdi), %ymm15
 ; AVX512DQ-FAST-NEXT:    vmovdqa 448(%rdi), %ymm6
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1],ymm15[2],ymm6[3,4,5],ymm15[6],ymm6[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm6, %ymm13
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm22
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4],xmm3[5],xmm5[6],xmm3[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u>
@@ -15127,31 +15141,32 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovdqa 672(%rdi), %xmm7
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,0,1,14,15,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm7, %xmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm7, %xmm17
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm7, %xmm20
 ; AVX512DQ-FAST-NEXT:    vpbroadcastw 700(%rdi), %xmm7
-; AVX512DQ-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm27 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm28
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm28[0,1,0,2]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm23
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm23[0,1,0,2]
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm6
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm14
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1],ymm14[2],ymm6[3,4,5],ymm14[6],ymm6[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm13
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0,1],ymm13[2],ymm6[3,4,5],ymm13[6],ymm6[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm6, %ymm7
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm3
-; AVX512DQ-FAST-NEXT:    vporq %ymm2, %ymm3, %ymm24
-; AVX512DQ-FAST-NEXT:    vmovdqa 224(%rdi), %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, %xmm9
+; AVX512DQ-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 224(%rdi), %xmm12
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm12, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpbroadcastw 252(%rdi), %xmm3
-; AVX512DQ-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 240(%rdi), %xmm5
+; AVX512DQ-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm0 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 240(%rdi), %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,0,1,6,7,8,9,18,19,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2],ymm14[3],ymm7[4,5],ymm14[6],ymm7[7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0,1,2],ymm13[3],ymm7[4,5],ymm13[6],ymm7[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm7, %ymm11
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm1[1],xmm4[2,3,4,5],xmm1[6],xmm4[7]
@@ -15159,633 +15174,626 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm4
 ; AVX512DQ-FAST-NEXT:    vpor %ymm3, %ymm4, %ymm3
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 160(%rdi), %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqa 160(%rdi), %ymm5
 ; AVX512DQ-FAST-NEXT:    vmovdqa 128(%rdi), %ymm10
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm10[0,1],ymm4[2],ymm10[3,4,5],ymm4[6],ymm10[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm19
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm10[0,1],ymm5[2],ymm10[3,4,5],ymm5[6],ymm10[7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm4
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm3[0,1,2,3],xmm4[4],xmm3[5],xmm4[6],xmm3[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm7
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,28,29>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm12, %ymm6
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm26 = [0,1,2,15,0,1,2,15]
-; AVX512DQ-FAST-NEXT:    # ymm26 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm6, %ymm26, %ymm7
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm5[0],xmm9[1],xmm5[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm5, %xmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm9, %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm9, %ymm7
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm6[0,1,2,3,4,5,6],ymm7[7]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm0[0],xmm12[1],xmm0[2,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, %xmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [2,3,0,1,14,15,14,15,8,9,10,11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm8, %xmm8
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm13[0,1,2],ymm15[3],ymm13[4,5],ymm15[6],ymm13[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm13, %ymm23
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm0[0,1,2],ymm15[3],ymm0[4,5],ymm15[6],ymm0[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm27
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm8
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0],xmm7[1],xmm8[2,3,4,5],xmm7[6],xmm8[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm1
-; AVX512DQ-FAST-NEXT:    vpermd %zmm18, %zmm0, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, %zmm30
+; AVX512DQ-FAST-NEXT:    vpermd %zmm18, %zmm17, %zmm0
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 608(%rdi), %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa 576(%rdi), %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm1[0,1],ymm0[2],ymm1[3,4,5],ymm0[6],ymm1[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm22
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm21
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm21
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm30
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm7
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm7[4],xmm2[5],xmm7[6],xmm2[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 640(%rdi), %ymm18
+; AVX512DQ-FAST-NEXT:    vmovdqa64 640(%rdi), %ymm26
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm3
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm18[0,1,0,2]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm26[0,1,0,2]
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm20
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm4, %ymm26, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa 688(%rdi), %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm17, %xmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm0[0],xmm13[1],xmm0[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm0, %xmm17
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm4, %xmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <2,6,9,u,13,u,u,u>
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm3, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm14[0],ymm11[1],ymm14[2,3],ymm11[4],ymm14[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm25
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0,1,2,3,4,5,6],ymm4[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa 688(%rdi), %xmm7
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, %xmm14
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0],xmm14[1],xmm7[2,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm7, %xmm20
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm3, %xmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = <2,6,9,u,13,u,u,u>
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm13[0],ymm11[1],ymm13[2,3],ymm11[4],ymm13[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm13, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm6
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1],xmm3[2,3,4,5],xmm6[6],xmm3[7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermd %zmm25, %zmm4, %zmm7
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1],xmm4[2,3,4,5],xmm6[6],xmm4[7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm17, %zmm7
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,2,3,4,5,10,11,16,17,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm7, %ymm7
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <4,5,2,3,0,1,14,15,12,13,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm3, %ymm3
-; AVX512DQ-FAST-NEXT:    vpor %ymm7, %ymm3, %ymm2
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm4, %ymm4
+; AVX512DQ-FAST-NEXT:    vpor %ymm7, %ymm4, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm9
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm10[0,1,2],ymm9[3],ymm10[4,5],ymm9[6],ymm10[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm10, %ymm19
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm7
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0],xmm3[1],xmm7[2,3,4,5],xmm3[6],xmm7[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm3, %xmm10
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm11 = [21474836482,21474836482,21474836482,21474836482]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm28, %ymm11, %ymm3
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm3, %ymm26, %ymm10
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm12, %xmm29
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm3, %xmm12
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm10, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm14
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm23, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm15[0],ymm14[1],ymm15[2,3],ymm14[4],ymm15[5,6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm12
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm12[1],xmm10[2,3,4,5],xmm12[6],xmm10[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm10, %ymm8
-; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm4, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm4, %ymm4
-; AVX512DQ-FAST-NEXT:    vpor %ymm4, %ymm8, %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm8
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1,2],ymm12[3],ymm8[4,5],ymm12[6],ymm8[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1],xmm6[2,3,4,5],xmm4[6],xmm6[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm4, %xmm4
-; AVX512DQ-FAST-NEXT:    vpermd %ymm18, %ymm11, %ymm6
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm6, %ymm26, %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm17, %xmm5
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm13[0],xmm5[0],xmm13[1],xmm5[1],xmm13[2],xmm5[2],xmm13[3],xmm5[3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm5, %xmm6
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm10[0,1,2],ymm5[3],ymm10[4,5],ymm5[6],ymm10[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm28
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm10, %ymm24
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm7
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2,3,4,5],xmm4[6],xmm7[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm2, %xmm29
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm10 = [21474836482,21474836482,21474836482,21474836482]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm23, %ymm10, %ymm11
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm4[0,1,2,3,4,5,6],ymm11[7]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm12[0],xmm9[0],xmm12[1],xmm9[1],xmm12[2],xmm9[2],xmm12[3],xmm9[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm9, %xmm22
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm4, %xmm12
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1],xmm6[2],xmm4[3],xmm6[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [10,3,6,15,12,13,6,15]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm25, %zmm2, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm27, %ymm13
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm27, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm15, (%rsp) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm15[0],ymm13[1],ymm15[2,3],ymm13[4],ymm15[5,6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm12
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm12[1],xmm11[2,3,4,5],xmm12[6],xmm11[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm11, %ymm8
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermd %zmm18, %zmm17, %zmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm3, %ymm3
+; AVX512DQ-FAST-NEXT:    vpor %ymm3, %ymm8, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2],ymm2[3],ymm5[4,5],ymm2[6],ymm5[7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm6
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm6[0],xmm3[1],xmm6[2,3,4,5],xmm3[6],xmm6[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm29, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-FAST-NEXT:    vpermd %ymm26, %ymm10, %ymm6
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm6[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, %xmm6
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm14[0],xmm6[0],xmm14[1],xmm6[1],xmm14[2],xmm6[2],xmm14[3],xmm6[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm14, %xmm30
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm6, %xmm7
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm3, %zmm27
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm1[0],ymm0[1],ymm1[2,3,4],ymm0[5],ymm1[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm7
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm7[0],xmm3[1],xmm7[2],xmm3[3],xmm7[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [10,3,6,15,12,13,6,15]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm17, %zmm8
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,4,5,10,11,0,1,22,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm6, %ymm6
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm4, %ymm4
-; AVX512DQ-FAST-NEXT:    vpor %ymm6, %ymm4, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm8, %ymm8
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm3, %ymm3
+; AVX512DQ-FAST-NEXT:    vpor %ymm3, %ymm8, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm9[0],ymm1[1],ymm9[2,3],ymm1[4],ymm9[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm9, %ymm19
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1],xmm4[2,3,4,5],xmm6[6],xmm4[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,4,5,2,3,0,1,14,15,12,13,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm4, %xmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25>
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm28[0,1,1,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm11
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm11, %ymm26, %ymm13
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [12,13,10,11,12,13,10,11,12,13,10,11,12,13,10,11]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm3, %xmm3
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm13, %zmm21
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm15[0],ymm14[1],ymm15[2,3,4],ymm14[5],ymm15[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm13
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm13[0],xmm3[1],xmm13[2],xmm3[3],xmm13[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm3, %ymm3
-; AVX512DQ-FAST-NEXT:    vpermd %zmm30, %zmm2, %zmm10
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm10, %ymm7
-; AVX512DQ-FAST-NEXT:    vpor %ymm7, %ymm3, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm9
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm28, %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm1[0],ymm9[1],ymm1[2,3],ymm9[4],ymm1[5,6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm8
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm8[1],xmm3[2,3,4,5],xmm8[6],xmm3[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,4,5,2,3,0,1,14,15,12,13,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm12
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,24,25>
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm23[0,1,1,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm3, %ymm14
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm12[0,1,2,3,4,5,6],ymm14[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [12,13,10,11,12,13,10,11,12,13,10,11,12,13,10,11]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm14, %zmm21
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm15[0],ymm13[1],ymm15[2,3,4],ymm13[5],ymm15[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm14
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm14[0],xmm4[1],xmm14[2],xmm4[3],xmm14[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm4, %ymm4
+; AVX512DQ-FAST-NEXT:    vpermd %zmm18, %zmm17, %zmm11
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm11, %ymm7
+; AVX512DQ-FAST-NEXT:    vpor %ymm7, %ymm4, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [3,6,10,13,3,6,10,13]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm23, %zmm0, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm20
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3,4,5,6],ymm2[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm11
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0],ymm5[1],ymm2[2,3],ymm5[4],ymm2[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm5, %ymm0
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm7
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm7[1],xmm4[2,3,4,5],xmm7[6],xmm4[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm7
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm26[0,1,1,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm4, %ymm8
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,6],ymm8[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm6, %xmm6
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm31
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0],ymm9[1],ymm1[2,3,4],ymm9[5],ymm1[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm5[1],xmm6[2],xmm5[3],xmm6[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm6, %xmm6
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm3, %ymm3
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5,6],ymm3[7]
+; AVX512DQ-FAST-NEXT:    vpbroadcastw 232(%rdi), %xmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm22, %xmm8
+; AVX512DQ-FAST-NEXT:    vpsrlq $48, %xmm22, %xmm7
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm3, %zmm24
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm1, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 576(%rdi), %zmm19
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [3,6,10,13,3,6,10,13]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm1, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, %zmm13
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm6
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3,4,5,6],ymm3[7]
 ; AVX512DQ-FAST-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm27, %zmm2, %zmm16 {%k1}
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm16 {%k1} # 16-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm12[0],ymm8[1],ymm12[2,3],ymm8[4],ymm12[5,6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm7
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm7[1],xmm2[2,3,4,5],xmm7[6],xmm2[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm18[0,1,1,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm6, %ymm7
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm7, %ymm26, %ymm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm5, %xmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm5
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0],ymm1[1],ymm5[2,3,4],ymm1[5],ymm5[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm7
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm7[0],xmm5[1],xmm7[2],xmm5[3],xmm7[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm4
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm4, %ymm26, %ymm5
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0],ymm8[1],ymm12[2,3,4],ymm8[5],ymm12[6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0],ymm0[1],ymm11[2,3,4],ymm0[5],ymm11[6,7]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm6, %ymm1
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm26, %ymm0
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm2, %zmm25
-; AVX512DQ-FAST-NEXT:    vpbroadcastw 232(%rdi), %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm29, %xmm6
-; AVX512DQ-FAST-NEXT:    vpsrlq $48, %xmm29, %xmm2
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm5, %zmm27
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm4, %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
 ; AVX512DQ-FAST-NEXT:    vpbroadcastw 680(%rdi), %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm17, %xmm12
-; AVX512DQ-FAST-NEXT:    vpsrlq $48, %xmm17, %xmm2
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm31
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,3,3,3,0,3,7,7]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm28, %ymm8, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, %xmm9
+; AVX512DQ-FAST-NEXT:    vpsrlq $48, %xmm20, %xmm3
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm29
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,3,3,3,0,3,7,7]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm23, %ymm4, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdi), %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [2,5,9,12,2,5,9,12]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm14, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [2,5,9,12,2,5,9,12]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm6, %zmm5
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [0,1,0,1,6,7,8,9,14,15,14,15,14,15,14,15,16,17,16,17,22,23,24,25,30,31,30,31,30,31,30,31]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm4, %ymm4
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7],ymm4[8,9,10,11,12],ymm1[13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm5, %ymm5
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm5[0,1,2,3,4],ymm1[5,6,7],ymm5[8,9,10,11,12],ymm1[13,14,15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpsrld $16, %xmm11, %xmm4
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm1, %zmm1
+; AVX512DQ-FAST-NEXT:    vpsrld $16, %xmm11, %xmm5
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm1, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 704(%rdi), %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqa 736(%rdi), %ymm5
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm19
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm22
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3],xmm4[4],xmm1[5],xmm4[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,10,11,8,9,6,7,4,5,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <0,u,u,u,4,7,11,14>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm13, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm10, %ymm10
+; AVX512DQ-FAST-NEXT:    vmovdqa 704(%rdi), %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa 736(%rdi), %ymm10
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm10[0,1],ymm5[2,3],ymm10[4,5],ymm5[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm10, %ymm18
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm20
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm5
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm5[0,1,2],xmm1[3],xmm5[4],xmm1[5],xmm5[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,u,u,u,10,11,8,9,6,7,4,5,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 768(%rdi), %zmm28
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <0,u,u,u,4,7,11,14>
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm16, %zmm10
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm10[0,1,2],xmm1[3,4,5,6],xmm10[7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0,1,2,3],ymm10[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm1[0,1,2,3],ymm10[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm3
-; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm15, %zmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm10, %ymm10
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm10[0,1,2,3,4,5,6],ymm3[7]
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm24 {%k1} # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermd %ymm18, %ymm8, %ymm3
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
-; AVX512DQ-FAST-NEXT:    vpermd %zmm23, %zmm14, %zmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm1, %ymm10
+; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm13, %zmm12
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm12, %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm10[7]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm1 {%k1} # 16-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermd %ymm26, %ymm4, %ymm2
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm2, %ymm2
+; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm6, %zmm3
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm3, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5,6,7],ymm3[8,9,10,11,12],ymm2[13,14,15]
-; AVX512DQ-FAST-NEXT:    vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm12[4],xmm3[5],xmm12[5],xmm3[6],xmm12[6],xmm3[7],xmm12[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm17, %xmm20
+; AVX512DQ-FAST-NEXT:    vpsrld $16, %xmm30, %xmm3
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm9, %xmm22
 ; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm3, %zmm2, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [0,4,7,0,0,4,7,0]
-; AVX512DQ-FAST-NEXT:    # ymm10 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm28, %ymm10, %ymm2
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [0,4,7,0,0,4,7,0]
+; AVX512DQ-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm23, %ymm12, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [2,6,9,13,2,6,9,13]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm17, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [2,6,9,13,2,6,9,13]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm10, %zmm0
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[4,5,2,3,4,5,10,11,12,13,12,13,12,13,12,13,20,21,18,19,20,21,26,27,28,29,28,29,28,29,28,29]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7],ymm0[8,9,10,11,12],ymm2[13,14,15]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm11[4],xmm6[4],xmm11[5],xmm6[5],xmm11[6],xmm6[6],xmm11[7],xmm6[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm11[4],xmm8[4],xmm11[5],xmm8[5],xmm11[6],xmm8[6],xmm11[7],xmm8[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm2, %xmm2
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm3
-; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm14, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 832(%rdi), %zmm0
+; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm6, %zmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,24,25,30,31>
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm2, %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm2[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 288(%rdi), %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm15
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm15[0,1,2],xmm4[3],xmm15[4],xmm4[5],xmm15[6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm4, %xmm15
-; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm16
-; AVX512DQ-FAST-NEXT:    vpermd %zmm16, %zmm13, %zmm13
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm13, %ymm5
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm5[0,1,2],xmm15[3,4,5,6],xmm5[7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm13
-; AVX512DQ-FAST-NEXT:    vpermd %zmm13, %zmm14, %zmm14
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm14, %ymm14
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm14[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm11
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm1, %ymm9
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm14
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm14[0,1,2,3],xmm5[4],xmm14[5],xmm5[6],xmm14[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm5, %xmm5
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm2[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa 288(%rdi), %ymm13
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0,1],ymm2[2,3],ymm13[4,5],ymm2[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm15
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm15[0,1,2],xmm5[3],xmm15[4],xmm5[5],xmm15[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm5, %xmm15
+; AVX512DQ-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm17
+; AVX512DQ-FAST-NEXT:    vpermd %zmm17, %zmm16, %zmm14
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[0,1,6,7,8,9,14,15,8,9,14,15,4,5,2,3,16,17,22,23,24,25,30,31,24,25,30,31,20,21,18,19]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm15 = xmm14[0,1,2],xmm15[3,4,5,6],xmm14[7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdi), %zmm16
+; AVX512DQ-FAST-NEXT:    vpermd %zmm16, %zmm6, %zmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm6, %ymm6
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm14[0,1,2,3,4,5],ymm6[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm13[0,1,2],ymm2[3],ymm13[4,5],ymm2[6],ymm13[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm5
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm14
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm14[0,1,2,3],xmm6[4],xmm14[5],xmm6[6],xmm14[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,u,u,u,12,13,10,11,8,9,6,7,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm6, %xmm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <1,u,u,u,4,8,11,15>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm16, %zmm7, %zmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm14, %ymm14
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm14[0,1,2],xmm5[3,4,5,6],xmm14[7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm13, %zmm17, %zmm14
+; AVX512DQ-FAST-NEXT:    vpermd %zmm17, %zmm7, %zmm15
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [2,3,4,5,10,11,12,13,0,1,0,1,0,1,0,1,18,19,20,21,26,27,28,29,16,17,16,17,16,17,16,17]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm15, %ymm15
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm15[0,1,2],xmm6[3,4,5,6],xmm15[7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm15[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm16, %zmm10, %zmm15
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,26,27,28,29>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm14, %ymm14
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm14[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm0[0,1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm14
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm14[0,1,2,3],xmm5[4],xmm14[5],xmm5[6],xmm14[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm7, %zmm7
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm7, %ymm6
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2],xmm5[3,4,5,6],xmm6[7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm17, %zmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm15, %ymm15
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5],ymm15[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm3
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm3[0,1,2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm15
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm15[0,1,2,3],xmm6[4],xmm15[5],xmm6[6],xmm15[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm6, %xmm6
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm7, %zmm7
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm7, %ymm4
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm4[0,1,2],xmm6[3,4,5,6],xmm4[7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm10, %zmm6
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm1[6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm1[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0,1],ymm11[2],ymm9[3,4,5],ymm11[6],ymm9[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm5
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm5[4],xmm1[5],xmm5[6],xmm1[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <0,1,2,3,0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1],ymm13[2],ymm5[3,4,5],ymm13[6],ymm5[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm5, %ymm11
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4],xmm1[5],xmm4[6],xmm1[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <0,1,2,3,0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <1,u,u,u,5,8,12,15>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm16, %zmm7, %zmm5
+; AVX512DQ-FAST-NEXT:    vpermd %zmm17, %zmm7, %zmm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,30,31,128,128,128,128,128,128,128,128]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm5, %ymm5
-; AVX512DQ-FAST-NEXT:    vpor %ymm5, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [3,6,10,13,3,6,10,13]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm13, %zmm15, %zmm5
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm5, %ymm5
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm5[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm4 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    movw $-512, %ax # imm = 0xFE00
-; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm4 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm0[2],ymm2[3,4,5],ymm0[6],ymm2[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm5
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm13
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm13[4],xmm1[5],xmm13[6],xmm1[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm7, %zmm6
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm6, %ymm6
 ; AVX512DQ-FAST-NEXT:    vpor %ymm6, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm15, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [3,6,10,13,3,6,10,13]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm16, %zmm5, %zmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm6, %ymm6
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm6[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm6 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    movw $-512, %ax # imm = 0xFE00
+; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm6 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm3[2],ymm2[3,4,5],ymm3[6],ymm2[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm6
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm15
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm15[4],xmm1[5],xmm15[6],xmm1[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm7, %zmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm4, %ymm4
+; AVX512DQ-FAST-NEXT:    vpor %ymm4, %ymm1, %ymm1
+; AVX512DQ-FAST-NEXT:    vpermd %zmm0, %zmm5, %zmm0
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm1 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermd %ymm18, %ymm10, %ymm0
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm27 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm27 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpermd %ymm26, %ymm12, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm23, %zmm17, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm10, %zmm1
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[4,5,2,3,4,5,10,11,12,13,12,13,12,13,12,13,20,21,18,19,20,21,26,27,28,29,28,29,28,29,28,29]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2,3,4],ymm0[5,6,7],ymm1[8,9,10,11,12],ymm0[13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, %xmm2
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm30, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm22, %xmm3
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 416(%rdi), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 384(%rdi), %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm1[2],ymm2[3,4,5],ymm1[6],ymm2[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm12
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm1, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa 384(%rdi), %ymm5
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1],ymm1[2],ymm5[3,4,5],ymm1[6],ymm5[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm1, %ymm4
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm9, %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0,1,2],ymm11[3],ymm9[4,5],ymm11[6],ymm9[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm6
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0],xmm1[1],xmm6[2,3,4,5],xmm1[6],xmm6[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,0,1,0,1,0,1,14,15,12,13,10,11,8,9]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm15
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm11[0,1,2],ymm13[3],ymm11[4,5],ymm13[6],ymm11[7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm8
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm8[0],xmm1[1],xmm8[2,3,4,5],xmm1[6],xmm8[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,0,1,0,1,0,1,14,15,12,13,10,11,8,9]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [2,11,2,11,12,5,8,9]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm16, %zmm10, %zmm8
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [2,11,2,11,12,5,8,9]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm17, %zmm12, %zmm10
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,22,23,28,29,18,19,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm8, %ymm8
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0,1,2],ymm0[3,4,5,6,7],ymm8[8,9,10],ymm0[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm10, %ymm10
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3,4,5,6,7],ymm10[8,9,10],ymm0[11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
-; AVX512DQ-FAST-NEXT:    vpor %ymm1, %ymm8, %ymm1
+; AVX512DQ-FAST-NEXT:    vpor %ymm1, %ymm10, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm21 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm21 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm21 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 864(%rdi), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 832(%rdi), %ymm4
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1],ymm1[2],ymm4[3,4,5],ymm1[6],ymm4[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm8
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm1, %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqa 832(%rdi), %ymm3
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1],ymm1[2],ymm3[3,4,5],ymm1[6],ymm3[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm3, %ymm7
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm1, %ymm3
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm10, %zmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm12, %zmm1
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm7
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2],ymm5[3],ymm7[4,5],ymm5[6],ymm7[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm20
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm10
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm10[0],xmm6[1],xmm10[2,3,4,5],xmm6[6],xmm10[7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1,2],ymm6[3],ymm2[4,5],ymm6[6],ymm2[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm10
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm19
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm12
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm12[0],xmm8[1],xmm12[2,3,4,5],xmm8[6],xmm12[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
-; AVX512DQ-FAST-NEXT:    vpor %ymm1, %ymm6, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[0,1,2,3,2,3,0,1,14,15,12,13,10,11],zero,zero
+; AVX512DQ-FAST-NEXT:    vpor %ymm1, %ymm8, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm25 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm25 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm31 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm31 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0,1],ymm12[2],ymm9[3,4],ymm12[5],ymm9[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [8,9,4,5,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm8
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm0, %xmm18
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[3,1,2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <0,3,7,10,14,u,u,u>
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm6, %zmm14
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <0,3,7,10,14,u,u,u>
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpermd %zmm22, %zmm8, %zmm14
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm14, %ymm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm20
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm14[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm28, %zmm27
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0,1,2],ymm3[3],ymm12[4,5],ymm3[6],ymm12[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm12, %ymm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm21
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, %zmm31
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm25, %zmm31
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm11
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm14
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm14[0],xmm1[1],xmm14[2,3,4,5],xmm1[6],xmm14[7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm11[0],ymm2[1],ymm11[2,3],ymm2[4],ymm11[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm11, %ymm25
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm22
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm13[0],ymm15[1],ymm13[2,3],ymm15[4],ymm13[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm15, %ymm21
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm15
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1],xmm14[2,3,4,5],xmm15[6],xmm14[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [2,3,2,3,2,3,2,3,0,1,14,15,12,13,10,11]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm1, %xmm1
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = <2,u,u,u,6,9,13,u>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm16, %zmm17, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <2,u,u,u,6,9,13,u>
+; AVX512DQ-FAST-NEXT:    vpermd %zmm17, %zmm16, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0,1,2],ymm1[3,4,5,6,7],ymm0[8,9,10],ymm1[11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
 ; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm14, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm27 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1],ymm2[2],ymm12[3,4],ymm2[5],ymm12[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm18, %xmm11
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm1
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm31 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqu (%rsp), %ymm2 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm18, %xmm14
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm1
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm6, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm6
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm8, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm8
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm8, %ymm1, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm28, %zmm31
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2],ymm4[3],ymm8[4,5],ymm4[6],ymm8[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm8, %ymm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm23
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm25, %zmm29
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2],ymm3[3],ymm7[4,5],ymm3[6],ymm7[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm18
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm3, %ymm7
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5],xmm0[6],xmm1[7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm17, %zmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm7, %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm7
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm7[0],ymm4[1],ymm7[2,3],ymm4[4],ymm7[5,6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm6[1],xmm5[2,3,4,5],xmm6[6],xmm5[7]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm16, %zmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm3
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0],ymm3[1],ymm10[2,3],ymm3[4],ymm10[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm10, %ymm30
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm8
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2,3,4,5],xmm8[6],xmm6[7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3,4,5,6,7],ymm1[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
-; AVX512DQ-FAST-NEXT:    vpor %ymm1, %ymm5, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,2,3,0,1,14,15,12,13],zero,zero
+; AVX512DQ-FAST-NEXT:    vpor %ymm1, %ymm6, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm31 {%k1}
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm10[2,3],ymm9[4,5],ymm10[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm10, %ymm24
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm9, %ymm14
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm29 {%k1}
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1],ymm12[2,3],ymm9[4,5],ymm12[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm12, %ymm23
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm9, %ymm24
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm11, %xmm29
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm5, %xmm27
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm14, %xmm27
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [10,11,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <0,4,7,11,14,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm9, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm30
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = <0,4,7,11,14,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpermd %zmm22, %zmm19, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1],ymm1[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm8
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0],ymm13[1],ymm8[2,3],ymm13[4],ymm8[5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm13, %ymm21
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0],ymm5[1],ymm11[2,3],ymm5[4],ymm11[5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm11, %ymm20
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm15
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm15[1],xmm0[2,3,4,5],xmm15[6],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm11
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm13
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm13[0],ymm11[1],ymm13[2,3,4],ymm11[5],ymm13[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm10
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm15[1],xmm10[2],xmm15[3],xmm10[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [4,5,4,5,4,5,4,5,2,3,0,1,14,15,12,13]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm10
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm13[0],ymm10[1],ymm13[2,3,4],ymm10[5],ymm13[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm12
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0],xmm15[1],xmm12[2],xmm15[3],xmm12[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [4,5,4,5,4,5,4,5,2,3,0,1,14,15,12,13]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm0
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = <3,u,u,u,6,10,13,u>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm16, %zmm17, %zmm15
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,18,19,20,21,26,27,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm15, %ymm15
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm15[0,1,2],ymm0[3,4,5,6,7],ymm15[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
-; AVX512DQ-FAST-NEXT:    vpor %ymm15, %ymm10, %ymm10
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm1 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <3,u,u,u,6,10,13,u>
+; AVX512DQ-FAST-NEXT:    vpermd %zmm17, %zmm16, %zmm8
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,1,18,19,20,21,26,27,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm8, %ymm8
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0,1,2],ymm0[3,4,5,6,7],ymm8[8,9,10],ymm0[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
+; AVX512DQ-FAST-NEXT:    vpor %ymm8, %ymm12, %ymm8
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm1 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1],ymm2[2,3],ymm12[4,5],ymm2[6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm12, %ymm22
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm12
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm29, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm10, %xmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm12
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm4[2,3],ymm2[4,5],ymm4[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm21
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm8
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm27, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm9, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm9, %ymm9
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm15 = ymm0[0,1],ymm9[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm10
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0],ymm10[1],ymm3[2,3],ymm10[4],ymm3[5,6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm9
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm9[1],xmm0[2,3,4,5],xmm9[6],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm17, %zmm6
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm6, %ymm5
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0],ymm4[1],ymm7[2,3,4],ymm4[5],ymm7[6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm8, %xmm8
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm19, %zmm8
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm8, %ymm8
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm0[0,1],ymm8[2,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm11
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0],ymm11[1],ymm7[2,3],ymm11[4],ymm7[5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm17
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm9
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm9[0],xmm6[1],xmm9[2],xmm6[3],xmm9[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm8
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm8[1],xmm0[2,3,4,5],xmm8[6],xmm0[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm16, %zmm8
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm8, %ymm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm15
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm15[0],ymm3[1],ymm15[2,3,4],ymm3[5],ymm15[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm16
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1],xmm9[2],xmm8[3],xmm9[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0,1,2],ymm0[3,4,5,6,7],ymm5[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
-; AVX512DQ-FAST-NEXT:    vpor %ymm5, %ymm6, %ymm5
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm15 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm15 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm0
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1,2],ymm0[3],ymm14[4,5],ymm0[6],ymm14[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,2,3,4,5,6,7,10,11,6,7,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm5, %xmm5
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3,4,5,6,7],ymm6[8,9,10],ymm0[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[0,1,2,3,6,7,4,5,2,3,0,1,14,15],zero,zero
+; AVX512DQ-FAST-NEXT:    vpor %ymm6, %ymm8, %ymm6
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm14 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm14 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3],ymm2[4,5],ymm0[6],ymm2[7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,2,3,4,5,6,7,10,11,6,7,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm6, %xmm6
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,4,6,7]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <1,4,8,11,15,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm20, %zmm5, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm9, %ymm9
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <1,4,8,11,15,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpermd %zmm22, %zmm6, %zmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm9, %ymm9
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm9[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm2
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0],ymm2[1],ymm8[2,3,4],ymm2[5],ymm8[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm9
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm7 = xmm9[0],xmm7[1],xmm9[2],xmm7[3],xmm9[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1],ymm11[2],ymm13[3,4],ymm11[5],ymm13[6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm2
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm2[0],ymm5[1],ymm2[2,3,4],ymm5[5],ymm2[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm7
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2],xmm4[3],xmm7[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm13[0,1],ymm10[2],ymm13[3,4],ymm10[5],ymm13[6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [8,9,8,9,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm2, %xmm9
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm2, %xmm7
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,3,1,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm9[0],xmm2[0],xmm9[1],xmm2[1],xmm9[2],xmm2[2],xmm9[3],xmm2[3]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [0,1,10,3,14,7,10,3]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm16, %zmm9, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [4,5,10,11,0,1,10,11,0,1,4,5,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm4, %ymm4
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0],ymm2[1,2],ymm4[3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,12,13,6,7,4,5,2,3,0,1,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm7, %xmm7
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm2[0,1,2],ymm7[3,4,5,6,7],ymm2[8,9,10],ymm7[11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm7[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm0 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [0,1,10,3,14,7,10,3]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm17, %zmm7, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [4,5,10,11,0,1,10,11,0,1,4,5,0,1,14,15,20,21,26,27,16,17,26,27,16,17,20,21,16,17,30,31]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm5, %ymm5
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0],ymm2[1,2],ymm5[3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,2,3,12,13,6,7,4,5,2,3,0,1,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm2[0,1,2],ymm4[3,4,5,6,7],ymm2[8,9,10],ymm4[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm0 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm2, %zmm0, %zmm0 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm21, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm12[3],ymm2[4,5],ymm12[6],ymm2[7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm7
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm7, %xmm6
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm4, %xmm4
 ; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,4,6,7]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
-; AVX512DQ-FAST-NEXT:    vpermd %zmm19, %zmm5, %zmm5
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm5, %ymm5
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm6, %zmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm4
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm7 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, (%rsp), %zmm0, %zmm12 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm5[2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm3[0],ymm10[1],ymm3[2,3,4],ymm10[5],ymm3[6,7]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm8
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0],xmm5[1],xmm8[2],xmm5[3],xmm8[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm5, %xmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm17, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm5
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1],ymm3[2],ymm5[3,4],ymm3[5],ymm5[6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm12 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm4[2,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm3
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0],ymm11[1],ymm3[2,3,4],ymm11[5],ymm3[6,7]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm10
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm10[0],xmm4[1],xmm10[2],xmm4[3],xmm10[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm16, %ymm3
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm15[0,1],ymm3[2],ymm15[3,4],ymm3[5],ymm15[6,7]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm5, %xmm3
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm5
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,3,1,3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm5 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vpermd %zmm26, %zmm9, %zmm8
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm8, %ymm8
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm8[0],ymm3[1,2],ymm8[3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermd %zmm28, %zmm7, %zmm7
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm7, %ymm7
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm7[0],ymm3[1,2],ymm7[3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm3[0,1,2],ymm4[3,4,5,6,7],ymm3[8,9,10],ymm4[11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm4[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm2 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm2 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm3, %zmm0, %zmm2 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm4 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm8 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm7 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm6 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm7 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm8 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm3, %zmm12
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm8, %zmm3, %zmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, (%rsi)
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm7, %zmm3, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, (%rsi)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 64(%rsi)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, 64(%rdx)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, (%rdx)
@@ -15797,16 +15805,15 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm3, 64(%r8)
 ; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovaps %zmm3, (%r8)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, 64(%r9)
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm3, (%r9)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, 64(%r9)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm31, (%r9)
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, 64(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm14, 64(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 64(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512DQ-FAST-NEXT:    addq $1288, %rsp # imm = 0x508
+; AVX512DQ-FAST-NEXT:    addq $1224, %rsp # imm = 0x4C8
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-5.ll
index 6bd87dfddf05e..d09672664d72a 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-5.ll
@@ -164,19 +164,20 @@ define void @load_i64_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3]
 ; AVX1-ONLY-NEXT:    vmovaps 96(%rdi), %xmm4
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm8 = xmm4[0,1],xmm8[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm8
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[0],ymm0[0],ymm4[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm5 = xmm5[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm5 = xmm5[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm6[0],xmm9[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm6[0],xmm8[1]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovapd %ymm3, (%rsi)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm2, (%rdx)
-; AVX1-ONLY-NEXT:    vmovaps %xmm8, 16(%rcx)
-; AVX1-ONLY-NEXT:    vmovdqa %xmm7, (%rcx)
+; AVX1-ONLY-NEXT:    vmovaps %ymm7, (%rcx)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm4, (%r8)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm0, (%r9)
 ; AVX1-ONLY-NEXT:    vzeroupper
@@ -202,7 +203,8 @@ define void @load_i64_stride5_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 96(%rdi), %xmm5
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm5 = xmm5[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm5[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm5[2,3]
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm5 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm7 = ymm2[8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7],ymm2[24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,3]
@@ -434,10 +436,8 @@ define void @load_i64_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY:       # %bb.0:
 ; AVX1-ONLY-NEXT:    vmovapd 128(%rdi), %ymm1
 ; AVX1-ONLY-NEXT:    vmovapd 256(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 224(%rdi), %ymm9
 ; AVX1-ONLY-NEXT:    vmovapd 96(%rdi), %ymm2
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 64(%rdi), %ymm7
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm7[0,1,2],ymm2[3]
 ; AVX1-ONLY-NEXT:    vmovapd (%rdi), %xmm10
@@ -445,9 +445,10 @@ define void @load_i64_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vmovapd 32(%rdi), %xmm4
 ; AVX1-ONLY-NEXT:    vmovdqa 48(%rdi), %xmm11
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm5 = xmm10[0],xmm4[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm5[0,1],ymm3[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm6 = ymm9[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd %ymm0, %ymm3
 ; AVX1-ONLY-NEXT:    vmovapd 192(%rdi), %xmm5
 ; AVX1-ONLY-NEXT:    vmovapd 160(%rdi), %xmm12
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm13 = xmm12[0],xmm5[1]
@@ -457,59 +458,58 @@ define void @load_i64_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm7 = ymm7[0],ymm14[0],ymm7[3],ymm14[2]
 ; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm10 = xmm10[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm10[0,1],ymm7[2,3]
-; AVX1-ONLY-NEXT:    vmovaps 288(%rdi), %xmm10
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm14
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm9 = ymm9[0],ymm14[0],ymm9[3],ymm14[2]
-; AVX1-ONLY-NEXT:    vmovdqa 208(%rdi), %xmm14
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm12 = xmm12[8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm9 = ymm12[0,1],ymm9[2,3]
+; AVX1-ONLY-NEXT:    vmovaps 288(%rdi), %xmm14
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm14, %ymm0, %ymm10
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm9 = ymm9[0],ymm10[0],ymm9[3],ymm10[2]
+; AVX1-ONLY-NEXT:    vmovdqa 208(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm10 = xmm12[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3]
 ; AVX1-ONLY-NEXT:    vmovaps 96(%rdi), %xmm12
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm13 = xmm12[0,1],xmm13[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm10 = xmm12[0,1],xmm13[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm10
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm8[0,1,2,3],xmm11[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovaps 256(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm13 = xmm11[0,1],xmm14[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm13, %ymm0, %ymm13
+; AVX1-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm14
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm15 = xmm14[0,1,2,3],xmm15[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm13 = ymm15[0,1,2,3],ymm13[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm12, %ymm0, %ymm12
-; AVX1-ONLY-NEXT:    vmovapd %ymm1, %ymm2
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm12 = ymm12[0],ymm1[0],ymm12[3],ymm1[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm15
 ; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm8 = xmm8[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm8 = ymm8[0,1],ymm12[2,3]
-; AVX1-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm12
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm14 = xmm12[0,1,2,3],xmm14[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovaps 256(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm10 = xmm0[0,1],xmm10[2,3]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vmovapd 288(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm3 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 224(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm12 = xmm12[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm12[0,1],ymm3[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm12 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm12 = mem[0,1,2],ymm2[3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = xmm4[0],xmm15[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm4[0,1],ymm12[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm5[0],xmm1[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 288(%rdi), %ymm12
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm11
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm11 = ymm11[0],ymm12[0],ymm11[3],ymm12[2]
+; AVX1-ONLY-NEXT:    vmovdqa 224(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm14 = xmm14[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm11 = ymm14[0,1],ymm11[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm2 = xmm4[0],xmm15[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0,1,2],ymm12[3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm0 = xmm5[0],xmm0[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3]
 ; AVX1-ONLY-NEXT:    vmovapd %ymm6, 32(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm1, (%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm2, (%rsi)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm9, 32(%rdx)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm7, (%rdx)
-; AVX1-ONLY-NEXT:    vmovaps %xmm10, 48(%rcx)
-; AVX1-ONLY-NEXT:    vmovdqa %xmm14, 32(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps %xmm13, 16(%rcx)
-; AVX1-ONLY-NEXT:    vmovdqa %xmm11, (%rcx)
-; AVX1-ONLY-NEXT:    vmovapd %ymm3, 32(%r8)
+; AVX1-ONLY-NEXT:    vmovaps %ymm13, 32(%rcx)
+; AVX1-ONLY-NEXT:    vmovaps %ymm10, (%rcx)
+; AVX1-ONLY-NEXT:    vmovapd %ymm11, 32(%r8)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm8, (%r8)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm0, 32(%r9)
-; AVX1-ONLY-NEXT:    vmovapd %ymm4, (%r9)
+; AVX1-ONLY-NEXT:    vmovapd %ymm1, (%r9)
 ; AVX1-ONLY-NEXT:    vzeroupper
 ; AVX1-ONLY-NEXT:    retq
 ;
 ; AVX2-ONLY-LABEL: load_i64_stride5_vf8:
 ; AVX2-ONLY:       # %bb.0:
-; AVX2-ONLY-NEXT:    vmovaps 160(%rdi), %ymm9
-; AVX2-ONLY-NEXT:    vmovaps (%rdi), %ymm11
+; AVX2-ONLY-NEXT:    vmovdqa 160(%rdi), %ymm9
+; AVX2-ONLY-NEXT:    vmovdqa (%rdi), %ymm11
 ; AVX2-ONLY-NEXT:    vmovdqa 288(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm2
 ; AVX2-ONLY-NEXT:    vmovdqa 256(%rdi), %ymm1
@@ -538,14 +538,16 @@ define void @load_i64_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm12 = ymm12[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,1,2,1]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],mem[2,3],ymm11[4,5],mem[6,7]
-; AVX2-ONLY-NEXT:    vmovaps 96(%rdi), %xmm13
-; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm13 = xmm13[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm11[2,3],ymm13[0,1]
-; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],mem[2,3],ymm9[4,5],mem[6,7]
-; AVX2-ONLY-NEXT:    vmovaps 256(%rdi), %xmm13
-; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm13 = xmm13[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm9[2,3],ymm13[0,1]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1],mem[2,3],ymm11[4,5],mem[6,7]
+; AVX2-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm13
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm13 = xmm13[0,1],mem[2,3]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm11 = ymm11[2,3],ymm13[2,3]
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1],mem[2,3],ymm9[4,5],mem[6,7]
+; AVX2-ONLY-NEXT:    vmovdqa 256(%rdi), %xmm13
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm13 = xmm13[0,1],mem[2,3]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm9 = ymm9[2,3],ymm13[2,3]
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm13 = mem[8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7]
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm14 = ymm3[8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7],ymm3[24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,0,3]
@@ -566,8 +568,8 @@ define void @load_i64_stride5_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vmovaps %ymm1, (%rsi)
 ; AVX2-ONLY-NEXT:    vmovdqa %ymm12, 32(%rdx)
 ; AVX2-ONLY-NEXT:    vmovdqa %ymm8, (%rdx)
-; AVX2-ONLY-NEXT:    vmovaps %ymm9, 32(%rcx)
-; AVX2-ONLY-NEXT:    vmovaps %ymm11, (%rcx)
+; AVX2-ONLY-NEXT:    vmovdqa %ymm9, 32(%rcx)
+; AVX2-ONLY-NEXT:    vmovdqa %ymm11, (%rcx)
 ; AVX2-ONLY-NEXT:    vmovdqa %ymm4, 32(%r8)
 ; AVX2-ONLY-NEXT:    vmovdqa %ymm13, (%r8)
 ; AVX2-ONLY-NEXT:    vmovdqa %ymm0, 32(%r9)
@@ -893,178 +895,174 @@ define void @load_i64_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX1-ONLY-LABEL: load_i64_stride5_vf16:
 ; AVX1-ONLY:       # %bb.0:
-; AVX1-ONLY-NEXT:    subq $440, %rsp # imm = 0x1B8
+; AVX1-ONLY-NEXT:    subq $360, %rsp # imm = 0x168
 ; AVX1-ONLY-NEXT:    vmovapd 96(%rdi), %ymm0
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 64(%rdi), %ymm8
-; AVX1-ONLY-NEXT:    vmovapd 576(%rdi), %ymm6
-; AVX1-ONLY-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 544(%rdi), %ymm9
-; AVX1-ONLY-NEXT:    vmovapd 256(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 64(%rdi), %ymm5
+; AVX1-ONLY-NEXT:    vmovapd 576(%rdi), %ymm7
+; AVX1-ONLY-NEXT:    vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 544(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vmovapd 256(%rdi), %ymm3
+; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 224(%rdi), %ymm2
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm2[0,1,2],ymm1[3]
-; AVX1-ONLY-NEXT:    vmovapd 192(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3]
+; AVX1-ONLY-NEXT:    vmovapd 192(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vmovapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 160(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm5 = xmm4[0],xmm1[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm5[0,1],ymm3[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm9[0,1,2],ymm6[3]
-; AVX1-ONLY-NEXT:    vmovapd 512(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 480(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm5 = xmm6[0],xmm1[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm5[0,1],ymm3[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm8[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd (%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm6 = xmm4[0],xmm6[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm6[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm1[0,1,2],ymm7[3]
+; AVX1-ONLY-NEXT:    vmovapd 512(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vmovapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 480(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm7 = xmm9[0],xmm6[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm7[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm5[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd (%rdi), %xmm13
 ; AVX1-ONLY-NEXT:    vmovapd 32(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm5 = xmm7[0],xmm0[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm5[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm11 = xmm13[0],xmm0[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm11[0,1],ymm3[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 416(%rdi), %ymm0
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 384(%rdi), %ymm13
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm13[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 352(%rdi), %xmm15
-; AVX1-ONLY-NEXT:    vmovapd 320(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm5 = xmm1[0],xmm15[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm5[0,1],ymm3[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 288(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm3
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[3],ymm3[2]
-; AVX1-ONLY-NEXT:    vmovdqa 208(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm3 = xmm4[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 608(%rdi), %xmm11
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm2
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm9[0],ymm2[0],ymm9[3],ymm2[2]
-; AVX1-ONLY-NEXT:    vmovdqa 528(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm3 = xmm6[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 384(%rdi), %ymm3
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm15 = ymm3[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd 352(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vmovapd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 320(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm14 = xmm0[0],xmm6[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm6 = ymm14[0,1],ymm15[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 288(%rdi), %xmm14
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm14, %ymm0, %ymm15
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[0],ymm15[0],ymm2[3],ymm15[2]
+; AVX1-ONLY-NEXT:    vmovdqa 208(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm4 = xmm4[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 128(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm3
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm6 = ymm8[0],ymm3[0],ymm8[3],ymm3[2]
-; AVX1-ONLY-NEXT:    vmovdqa 48(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm7[0,1],ymm6[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 448(%rdi), %xmm10
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm6
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm6 = ymm13[0],ymm6[0],ymm13[3],ymm6[2]
-; AVX1-ONLY-NEXT:    vmovdqa 368(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm6[2,3]
+; AVX1-ONLY-NEXT:    vmovaps 608(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm4
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm4[0],ymm1[3],ymm4[2]
+; AVX1-ONLY-NEXT:    vmovdqa 528(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm9 = xmm9[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm9[0,1],ymm1[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 256(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm3 = xmm0[0,1],xmm5[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovapd 288(%rdi), %ymm7
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm7[0],ymm0[3],ymm7[2]
-; AVX1-ONLY-NEXT:    vmovdqa 224(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovaps 128(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm9
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm5[0],ymm9[0],ymm5[3],ymm9[2]
+; AVX1-ONLY-NEXT:    vmovdqa 48(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm13 = xmm13[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm13[0,1],ymm5[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, (%rsp) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 448(%rdi), %xmm13
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm13, %ymm0, %ymm5
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[3],ymm5[2]
+; AVX1-ONLY-NEXT:    vmovdqa 368(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 496(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 576(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm3 = xmm1[0,1],xmm11[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vmovaps 256(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm3 = xmm4[0,1],xmm14[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX1-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm14 = xmm11[0,1,2,3],xmm15[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm14[0,1,2,3],ymm3[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 576(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vmovdqa 496(%rdi), %xmm5
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm7 = xmm5[0,1,2,3],xmm7[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm15 = ymm7[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovaps 96(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm7[0,1],xmm6[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm10
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0,1,2,3],xmm9[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm9[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovaps 416(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm13 = xmm2[0,1],xmm13[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm13, %ymm0, %ymm13
+; AVX1-ONLY-NEXT:    vmovdqa 336(%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm8 = xmm12[0,1,2,3],xmm8[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm13 = ymm8[0,1,2,3],ymm13[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
+; AVX1-ONLY-NEXT:    vmovapd 288(%rdi), %ymm8
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[0],ymm8[0],ymm4[3],ymm8[2]
+; AVX1-ONLY-NEXT:    vmovdqa 224(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm9 = xmm11[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm9 = ymm9[0,1],ymm4[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
 ; AVX1-ONLY-NEXT:    vmovapd 608(%rdi), %ymm11
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm11[0],ymm1[3],ymm11[2]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[0],ymm11[0],ymm3[3],ymm11[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 544(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 96(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm12 = xmm0[0,1],xmm9[2,3]
-; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm13 = xmm9[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovapd 128(%rdi), %ymm3
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[3],ymm3[2]
-; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm9 = xmm9[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm14 = ymm9[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovdqa 336(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm0[0,1,2,3],xmm8[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovaps 416(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm10 = xmm8[0,1],xmm10[2,3]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm8
-; AVX1-ONLY-NEXT:    vmovapd 448(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm8 = ymm8[0],ymm1[0],ymm8[3],ymm1[2]
-; AVX1-ONLY-NEXT:    vmovdqa 384(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm8 = ymm0[0,1],ymm8[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm0 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm7[3]
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm5 = xmm5[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm3
+; AVX1-ONLY-NEXT:    vmovapd 128(%rdi), %ymm7
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[0],ymm7[0],ymm3[3],ymm7[2]
+; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm10 = xmm10[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm10[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vmovapd 448(%rdi), %ymm10
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[0],ymm10[0],ymm2[3],ymm10[2]
+; AVX1-ONLY-NEXT:    vmovdqa 384(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm12 = xmm12[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm12[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm8 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm8 = mem[0,1,2],ymm8[3]
 ; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm6 = mem[0,1,2,3],xmm6[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm6 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm6 = mem[0,1,2],ymm11[3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm6 = ymm6[0,1],ymm8[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm8 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm8 = mem[0,1,2],ymm11[3]
 ; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm4 = mem[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm4[0,1],ymm6[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm1 = mem[0,1,2],ymm1[3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm2 = xmm15[0],xmm2[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm2 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm2 = mem[0,1,2],ymm3[3]
-; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm3 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm3 = mem[0,1,2,3],xmm5[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm3, 64(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm3, (%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm3, 96(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm3, 32(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm3, 64(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm3, (%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm3, 96(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm3, 32(%rdx)
-; AVX1-ONLY-NEXT:    vmovaps %xmm10, 80(%rcx)
-; AVX1-ONLY-NEXT:    vmovdqa %xmm9, 64(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps %xmm12, 16(%rcx)
-; AVX1-ONLY-NEXT:    vmovdqa %xmm13, (%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm3, 112(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm3, 96(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm3, 48(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm3, 32(%rcx)
-; AVX1-ONLY-NEXT:    vmovapd %ymm8, 64(%r8)
-; AVX1-ONLY-NEXT:    vmovapd %ymm14, (%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm3, 96(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm3, 32(%r8)
-; AVX1-ONLY-NEXT:    vmovapd %ymm2, (%r9)
-; AVX1-ONLY-NEXT:    vmovapd %ymm1, 64(%r9)
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm4[0,1],ymm8[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm8 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm8 = mem[0,1,2],ymm10[3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm0 = mem[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm8[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm7 = mem[0,1,2],ymm7[3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm7[2,3]
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm7, 64(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm7, (%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm7, 96(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm7, 32(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm7, 64(%rdx)
+; AVX1-ONLY-NEXT:    vmovups (%rsp), %ymm7 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm7, (%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm7, 96(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm7, 32(%rdx)
+; AVX1-ONLY-NEXT:    vmovaps %ymm13, 64(%rcx)
+; AVX1-ONLY-NEXT:    vmovaps %ymm14, (%rcx)
+; AVX1-ONLY-NEXT:    vmovaps %ymm15, 96(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm7, 32(%rcx)
+; AVX1-ONLY-NEXT:    vmovapd %ymm2, 64(%r8)
+; AVX1-ONLY-NEXT:    vmovapd %ymm3, (%r8)
+; AVX1-ONLY-NEXT:    vmovapd %ymm5, 96(%r8)
+; AVX1-ONLY-NEXT:    vmovapd %ymm9, 32(%r8)
+; AVX1-ONLY-NEXT:    vmovapd %ymm1, (%r9)
+; AVX1-ONLY-NEXT:    vmovapd %ymm0, 64(%r9)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm4, 96(%r9)
-; AVX1-ONLY-NEXT:    vmovapd %ymm0, 32(%r9)
-; AVX1-ONLY-NEXT:    addq $440, %rsp # imm = 0x1B8
+; AVX1-ONLY-NEXT:    vmovapd %ymm6, 32(%r9)
+; AVX1-ONLY-NEXT:    addq $360, %rsp # imm = 0x168
 ; AVX1-ONLY-NEXT:    vzeroupper
 ; AVX1-ONLY-NEXT:    retq
 ;
@@ -1135,30 +1133,34 @@ define void @load_i64_stride5_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,2,1]
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm5[4,5,6,7]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vmovaps 160(%rdi), %ymm4
-; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],mem[2,3],ymm4[4,5],mem[6,7]
-; AVX2-ONLY-NEXT:    vmovaps 256(%rdi), %xmm5
-; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm5 = xmm5[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm4[2,3],ymm5[0,1]
-; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vmovaps 480(%rdi), %ymm4
-; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],mem[2,3],ymm4[4,5],mem[6,7]
-; AVX2-ONLY-NEXT:    vmovaps 576(%rdi), %xmm5
-; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm5 = xmm5[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm4[2,3],ymm5[0,1]
-; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vmovaps (%rdi), %ymm4
-; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],mem[2,3],ymm4[4,5],mem[6,7]
-; AVX2-ONLY-NEXT:    vmovaps 96(%rdi), %xmm5
-; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm5 = xmm5[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm4[2,3],ymm5[0,1]
-; AVX2-ONLY-NEXT:    vmovups %ymm0, (%rsp) # 32-byte Spill
-; AVX2-ONLY-NEXT:    vmovaps 320(%rdi), %ymm4
-; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],mem[2,3],ymm4[4,5],mem[6,7]
-; AVX2-ONLY-NEXT:    vmovaps 416(%rdi), %xmm5
-; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm5 = xmm5[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm4[2,3],ymm5[0,1]
-; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vmovdqa 160(%rdi), %ymm4
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],mem[2,3],ymm4[4,5],mem[6,7]
+; AVX2-ONLY-NEXT:    vmovdqa 256(%rdi), %xmm5
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1],mem[2,3]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm4[2,3],ymm5[2,3]
+; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vmovdqa 480(%rdi), %ymm4
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],mem[2,3],ymm4[4,5],mem[6,7]
+; AVX2-ONLY-NEXT:    vmovdqa 576(%rdi), %xmm5
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1],mem[2,3]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm4[2,3],ymm5[2,3]
+; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vmovdqa (%rdi), %ymm4
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],mem[2,3],ymm4[4,5],mem[6,7]
+; AVX2-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm5
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1],mem[2,3]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm4[2,3],ymm5[2,3]
+; AVX2-ONLY-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
+; AVX2-ONLY-NEXT:    vmovdqa 320(%rdi), %ymm4
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],mem[2,3],ymm4[4,5],mem[6,7]
+; AVX2-ONLY-NEXT:    vmovdqa 416(%rdi), %xmm5
+; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1],mem[2,3]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm4[2,3],ymm5[2,3]
+; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm4 = ymm12[8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7],ymm12[24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,3]
 ; AVX2-ONLY-NEXT:    vmovdqa 224(%rdi), %xmm15
@@ -1835,401 +1837,395 @@ define void @load_i64_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX1-ONLY-LABEL: load_i64_stride5_vf32:
 ; AVX1-ONLY:       # %bb.0:
-; AVX1-ONLY-NEXT:    subq $1368, %rsp # imm = 0x558
-; AVX1-ONLY-NEXT:    vmovaps 896(%rdi), %ymm4
+; AVX1-ONLY-NEXT:    subq $1336, %rsp # imm = 0x538
+; AVX1-ONLY-NEXT:    vmovaps 896(%rdi), %ymm2
+; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 864(%rdi), %ymm4
 ; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 864(%rdi), %ymm6
-; AVX1-ONLY-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 576(%rdi), %ymm3
-; AVX1-ONLY-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 544(%rdi), %ymm7
-; AVX1-ONLY-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 576(%rdi), %ymm3
+; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 544(%rdi), %ymm14
 ; AVX1-ONLY-NEXT:    vmovapd 256(%rdi), %ymm0
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 224(%rdi), %ymm10
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm10[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 192(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 160(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm2 = xmm5[0],xmm2[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0,1,2,3,4,5],ymm3[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 512(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 480(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm3 = xmm1[0,1],xmm3[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm6[0,1,2,3,4,5],ymm4[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 832(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 800(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm3 = xmm9[0,1],xmm3[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1216(%rdi), %ymm2
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 224(%rdi), %ymm11
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm11[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd 192(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 160(%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm12[0],xmm1[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm14[0,1,2],ymm3[3]
+; AVX1-ONLY-NEXT:    vmovapd 512(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 480(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm15[0],xmm1[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm2[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 832(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 800(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1216(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 1184(%rdi), %ymm6
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm6[0,1,2],ymm2[3]
-; AVX1-ONLY-NEXT:    vmovapd 1152(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vmovapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1120(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = xmm3[0],xmm4[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd 1152(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1120(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm2 = xmm7[0],xmm2[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 96(%rdi), %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 64(%rdi), %ymm2
 ; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps (%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vmovaps 32(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm7 = xmm4[0,1],xmm0[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 32(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 416(%rdi), %ymm0
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 384(%rdi), %ymm8
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm8[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm8[0,1,2],ymm0[3]
 ; AVX1-ONLY-NEXT:    vmovapd 352(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 320(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm11 = xmm2[0],xmm0[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm11[0,1],ymm7[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 320(%rdi), %xmm5
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm3 = xmm5[0],xmm0[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 736(%rdi), %ymm0
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 704(%rdi), %ymm13
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm11 = ymm13[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd 704(%rdi), %ymm3
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm9 = ymm3[0,1,2],ymm0[3]
 ; AVX1-ONLY-NEXT:    vmovapd 672(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 640(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm12 = xmm7[0],xmm0[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm11 = ymm12[0,1],ymm11[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1056(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1024(%rdi), %ymm12
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm14 = ymm12[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 992(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 960(%rdi), %xmm11
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm15 = xmm11[0],xmm0[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 288(%rdi), %xmm14
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm14, %ymm0, %ymm15
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm15 = ymm10[0],ymm15[0],ymm10[3],ymm15[2]
-; AVX1-ONLY-NEXT:    vmovdqa 208(%rdi), %xmm10
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm5[8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm15[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 608(%rdi), %xmm15
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm15, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm5[0],ymm0[0],ymm5[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 528(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 928(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovapd 640(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm10 = xmm2[0],xmm0[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1056(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1024(%rdi), %ymm9
+; AVX1-ONLY-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm9[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 992(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 848(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm9 = xmm9[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm9[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovaps 960(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm13 = xmm9[0,1],xmm0[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm13[0,1,2,3],ymm10[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 288(%rdi), %xmm10
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm13
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm13 = ymm11[0],ymm13[0],ymm11[3],ymm13[2]
+; AVX1-ONLY-NEXT:    vmovdqa 208(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm12 = xmm12[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm12 = ymm12[0,1],ymm13[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 608(%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm12, %ymm0, %ymm13
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm13 = ymm14[0],ymm13[0],ymm14[3],ymm13[2]
+; AVX1-ONLY-NEXT:    vmovdqa 528(%rdi), %xmm14
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm15 = xmm15[8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm13 = ymm15[0,1],ymm13[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 928(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm15, %ymm0, %ymm13
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm13 = ymm0[0],ymm13[0],ymm0[3],ymm13[2]
+; AVX1-ONLY-NEXT:    vmovdqa 848(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm13[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 1248(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm6[0],ymm0[0],ymm6[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1168(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm5 = xmm3[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm6 = ymm6[0],ymm1[0],ymm6[3],ymm1[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1168(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm7[0,1],ymm6[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 128(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm6
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm6 = ymm0[0],ymm6[0],ymm0[3],ymm6[2]
+; AVX1-ONLY-NEXT:    vmovdqa 48(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm4 = xmm4[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm4[0,1],ymm6[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 448(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm4
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm4 = ymm8[0],ymm4[0],ymm8[3],ymm4[2]
+; AVX1-ONLY-NEXT:    vmovdqa 368(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm5[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 768(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 48(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vmovdqa 688(%rdi), %xmm3
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm4 = xmm4[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 448(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm8[0],ymm0[0],ymm8[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 368(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, (%rsp) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 768(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm13[0],ymm0[0],ymm13[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 688(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = xmm7[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, %xmm7
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1088(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm12[0],ymm0[0],ymm12[3],ymm0[2]
+; AVX1-ONLY-NEXT:    vmovaps 1088(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[3],ymm2[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 1008(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = xmm11[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm10[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 256(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm4 = xmm2[0,1],xmm14[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-ONLY-NEXT:    vmovapd 288(%rdi), %ymm13
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[0],ymm13[0],ymm2[3],ymm13[2]
-; AVX1-ONLY-NEXT:    vmovdqa 224(%rdi), %xmm14
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 496(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpblendw $240, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = xmm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 576(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm4 = xmm2[0,1],xmm15[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm3 = xmm9[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 256(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm4[0,1],xmm10[2,3]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-ONLY-NEXT:    vmovapd 608(%rdi), %ymm15
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[0],ymm15[0],ymm2[3],ymm15[2]
-; AVX1-ONLY-NEXT:    vmovdqa 544(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 816(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 896(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = xmm1[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vmovapd 928(%rdi), %ymm2
+; AVX1-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm13
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm13[0,1,2,3],xmm11[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 576(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm6[0,1],xmm12[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm3
+; AVX1-ONLY-NEXT:    vmovdqa 496(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm2[0,1,2,3],xmm14[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm9[0,1,2,3],ymm3[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 896(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm9 = xmm3[0,1],xmm15[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm9
+; AVX1-ONLY-NEXT:    vmovaps 816(%rdi), %xmm5
+; AVX1-ONLY-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = xmm5[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm9[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1216(%rdi), %xmm5
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm9 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm9 = xmm5[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm9
+; AVX1-ONLY-NEXT:    vmovdqa 1136(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm11[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm9[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1056(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm9
+; AVX1-ONLY-NEXT:    vmovdqa 976(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm8 = xmm0[0,1,2,3],xmm8[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm9[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 96(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm8 = xmm8[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm8
+; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm7 = xmm9[0,1,2,3],xmm7[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 416(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm7 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm7 = xmm9[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
+; AVX1-ONLY-NEXT:    vmovaps 336(%rdi), %xmm10
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm8 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm8 = xmm10[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 736(%rdi), %xmm14
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm7 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm7 = xmm14[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
+; AVX1-ONLY-NEXT:    vmovaps 656(%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm8 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm8 = xmm12[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
+; AVX1-ONLY-NEXT:    vmovapd 288(%rdi), %ymm15
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm8 = ymm4[0],ymm15[0],ymm4[3],ymm15[2]
+; AVX1-ONLY-NEXT:    vmovdqa 224(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm13 = xmm13[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm13[0,1],ymm8[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm6
+; AVX1-ONLY-NEXT:    vmovapd 608(%rdi), %ymm4
+; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm13 = ymm6[0],ymm4[0],ymm6[3],ymm4[2]
+; AVX1-ONLY-NEXT:    vmovdqa 544(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm13[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[3],ymm2[2]
-; AVX1-ONLY-NEXT:    vmovdqa 864(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 1136(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm9[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1216(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = xmm1[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vmovapd 1248(%rdi), %ymm2
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vmovapd 928(%rdi), %ymm3
+; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[3],ymm3[2]
+; AVX1-ONLY-NEXT:    vmovdqa 864(%rdi), %xmm13
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm3 = mem[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[3],ymm2[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1184(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vmovapd 1248(%rdi), %ymm3
+; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[3],ymm3[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1184(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm3 = xmm11[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vmovapd 1088(%rdi), %ymm6
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm6[0],ymm1[3],ymm6[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1024(%rdi), %xmm5
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 976(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm9[0,1,2,3],xmm8[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1056(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm0[0,1],xmm3[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vmovapd 1088(%rdi), %ymm10
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm11 = ymm1[0],ymm10[0],ymm1[3],ymm10[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1024(%rdi), %xmm12
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm9 = xmm9[8,9,10,11,12,13,14,15],xmm12[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm9[0,1],ymm11[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 656(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm9[0,1,2,3],xmm7[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 736(%rdi), %xmm11
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm0 = xmm11[0,1],xmm6[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm8
-; AVX1-ONLY-NEXT:    vmovapd 768(%rdi), %ymm6
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm8 = ymm8[0],ymm6[0],ymm8[3],ymm6[2]
-; AVX1-ONLY-NEXT:    vmovdqa 704(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm9 = xmm9[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm9[0,1],ymm8[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 336(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vpblendw $240, (%rsp), %xmm8, %xmm0 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm0 = xmm8[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 416(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm11 = xmm9[0,1],xmm5[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm14, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 768(%rdi), %ymm4
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm11 = ymm0[0],ymm4[0],ymm0[3],ymm4[2]
+; AVX1-ONLY-NEXT:    vmovdqa 704(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm12 = xmm12[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm14 = ymm12[0,1],ymm11[2,3]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm9
-; AVX1-ONLY-NEXT:    vmovapd 448(%rdi), %ymm4
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm9 = ymm9[0],ymm4[0],ymm9[3],ymm4[2]
-; AVX1-ONLY-NEXT:    vmovdqa 384(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm8 = xmm8[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm8[0,1],ymm9[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, (%rsp) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 96(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm9 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm9 = xmm0[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vpblendw $240, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm8 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm8 = xmm1[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm2
-; AVX1-ONLY-NEXT:    vmovapd 128(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm3 = ymm2[0],ymm0[0],ymm2[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm0 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm13[3]
-; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1,2,3],xmm14[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm14 = ymm1[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm0 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm4[3]
-; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1,2,3],xmm5[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 448(%rdi), %ymm2
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm9 = ymm9[0],ymm2[0],ymm9[3],ymm2[2]
+; AVX1-ONLY-NEXT:    vmovdqa 384(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm10 = xmm10[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm11 = ymm10[0,1],ymm9[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm9 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vmovapd 128(%rdi), %ymm12
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm9 = ymm9[0],ymm12[0],ymm9[3],ymm12[2]
+; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm10 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm10 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm10 = ymm10[0,1],ymm9[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm9 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm9 = mem[0,1,2],ymm12[3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm0 = mem[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm12 = ymm0[0,1],ymm9[2,3]
 ; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm15[3]
-; AVX1-ONLY-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vblendpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0],xmm1[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm5 = ymm1[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm6[3]
-; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1,2,3],xmm7[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm7 = mem[0,1,2,3],xmm7[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm9 = ymm7[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm0 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm2[3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm0 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm10[3]
-; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1,2,3],xmm12[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm1 = mem[0,1,2,3,4,5],ymm1[6,7]
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm6 = mem[0,1],xmm6[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 192(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 128(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 64(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, (%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 224(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 160(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 96(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 32(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 192(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 128(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 64(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, (%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 224(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 160(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 96(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 32(%rdx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, 208(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, 192(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, 144(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, 128(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps %xmm11, 80(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, 64(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps %xmm9, 16(%rcx)
-; AVX1-ONLY-NEXT:    vmovdqa %xmm8, (%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, 240(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, 224(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, 176(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, 160(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, 112(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, 96(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, 48(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, 32(%rcx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, (%r8)
-; AVX1-ONLY-NEXT:    vmovups (%rsp), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 64(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 128(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 192(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 224(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 160(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 96(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 32(%r8)
-; AVX1-ONLY-NEXT:    vmovaps %ymm1, 224(%r9)
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1,2,3],xmm8[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm8 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm0 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm4[3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm2 = mem[0,1,2,3],xmm13[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm6[3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm2 = mem[0,1,2,3],xmm5[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm2 = mem[0,1,2,3,4,5],ymm2[6,7]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm4 = mem[0,1],xmm4[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 192(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 128(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 64(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, (%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 224(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 160(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 96(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 32(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 192(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 128(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 64(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, (%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 224(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 160(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 96(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 32(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 128(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 64(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, (%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 192(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 224(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 160(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 96(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 32(%rcx)
+; AVX1-ONLY-NEXT:    vmovapd %ymm10, (%r8)
+; AVX1-ONLY-NEXT:    vmovapd %ymm11, 64(%r8)
+; AVX1-ONLY-NEXT:    vmovapd %ymm14, 128(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 192(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 224(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 160(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 96(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 32(%r8)
+; AVX1-ONLY-NEXT:    vmovaps %ymm2, 224(%r9)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm0, 192(%r9)
 ; AVX1-ONLY-NEXT:    vmovaps %ymm3, 160(%r9)
-; AVX1-ONLY-NEXT:    vmovapd %ymm2, 128(%r9)
-; AVX1-ONLY-NEXT:    vmovapd %ymm5, 96(%r9)
-; AVX1-ONLY-NEXT:    vmovapd %ymm4, 64(%r9)
-; AVX1-ONLY-NEXT:    vmovapd %ymm14, 32(%r9)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, (%r9)
-; AVX1-ONLY-NEXT:    addq $1368, %rsp # imm = 0x558
+; AVX1-ONLY-NEXT:    vmovapd %ymm1, 128(%r9)
+; AVX1-ONLY-NEXT:    vmovaps %ymm8, 96(%r9)
+; AVX1-ONLY-NEXT:    vmovapd %ymm7, 64(%r9)
+; AVX1-ONLY-NEXT:    vmovapd %ymm9, 32(%r9)
+; AVX1-ONLY-NEXT:    vmovapd %ymm12, (%r9)
+; AVX1-ONLY-NEXT:    addq $1336, %rsp # imm = 0x538
 ; AVX1-ONLY-NEXT:    vzeroupper
 ; AVX1-ONLY-NEXT:    retq
 ;
@@ -2393,49 +2389,57 @@ define void @load_i64_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 256(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 480(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 576(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 800(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 896(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 1120(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 1216(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 960(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 1056(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 96(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 320(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 416(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 640(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 736(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
 ; AVX2-ONLY-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
@@ -3840,857 +3844,874 @@ define void @load_i64_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX1-ONLY-LABEL: load_i64_stride5_vf64:
 ; AVX1-ONLY:       # %bb.0:
-; AVX1-ONLY-NEXT:    subq $3192, %rsp # imm = 0xC78
-; AVX1-ONLY-NEXT:    vmovaps 736(%rdi), %ymm4
-; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 704(%rdi), %ymm5
+; AVX1-ONLY-NEXT:    subq $3288, %rsp # imm = 0xCD8
+; AVX1-ONLY-NEXT:    vmovaps 896(%rdi), %ymm5
 ; AVX1-ONLY-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 416(%rdi), %ymm2
-; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 384(%rdi), %ymm3
+; AVX1-ONLY-NEXT:    vmovaps 864(%rdi), %ymm6
+; AVX1-ONLY-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 576(%rdi), %ymm4
+; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 544(%rdi), %ymm7
+; AVX1-ONLY-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 256(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 224(%rdi), %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd 192(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vmovapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 160(%rdi), %xmm13
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm3 = xmm13[0],xmm3[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm7[0,1,2,3,4,5],ymm4[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 512(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 480(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm4 = xmm1[0,1],xmm4[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 96(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm5[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 832(%rdi), %xmm5
+; AVX1-ONLY-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 800(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm5 = xmm3[0,1],xmm5[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1216(%rdi), %ymm4
+; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1184(%rdi), %ymm5
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm6 = ymm5[0,1,2],ymm4[3]
+; AVX1-ONLY-NEXT:    vmovapd 1152(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vmovapd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1120(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm7 = xmm4[0],xmm7[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1536(%rdi), %ymm6
+; AVX1-ONLY-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1504(%rdi), %ymm7
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm8 = ymm7[0,1,2],ymm6[3]
+; AVX1-ONLY-NEXT:    vmovapd 1472(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vmovapd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1440(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm9 = xmm6[0],xmm9[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1856(%rdi), %ymm8
+; AVX1-ONLY-NEXT:    vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1824(%rdi), %ymm9
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm10 = ymm9[0,1,2],ymm8[3]
+; AVX1-ONLY-NEXT:    vmovapd 1792(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vmovapd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1760(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm11 = xmm8[0],xmm11[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2176(%rdi), %ymm10
+; AVX1-ONLY-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2144(%rdi), %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 64(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps (%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vmovaps 32(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm6[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm11 = ymm0[0,1,2,3,4,5],ymm10[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 2112(%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2080(%rdi), %xmm10
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm12 = xmm10[0,1],xmm12[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2496(%rdi), %ymm11
+; AVX1-ONLY-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2464(%rdi), %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm2[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 352(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm11 = ymm0[0,1,2,3,4,5],ymm11[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 2432(%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2400(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 320(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm3 = xmm2[0,1],xmm0[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm12 = xmm0[0,1],xmm12[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 96(%rdi), %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3,4,5],ymm4[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 672(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps 64(%rdi), %ymm11
+; AVX1-ONLY-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vmovaps (%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 32(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 640(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm4 = xmm1[0,1],xmm0[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1056(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1024(%rdi), %ymm4
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm5 = ymm4[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 992(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 960(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm7 = xmm3[0],xmm0[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm7[0,1],ymm5[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1376(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1344(%rdi), %ymm7
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm8 = ymm7[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 1312(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1280(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm9 = xmm5[0],xmm0[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm9[0,1],ymm8[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1696(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1664(%rdi), %ymm9
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm10 = ymm9[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 1632(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1600(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm11 = xmm8[0],xmm0[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm11[0,1],ymm10[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 2016(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1984(%rdi), %ymm11
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm12 = ymm11[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 1952(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1920(%rdi), %xmm10
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm13 = xmm10[0],xmm0[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm13[0,1],ymm12[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 2336(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm12 = xmm12[0,1],xmm0[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 416(%rdi), %ymm0
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 2304(%rdi), %ymm13
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm14 = ymm13[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 2272(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovapd 384(%rdi), %ymm12
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm15 = ymm12[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd 352(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 2240(%rdi), %xmm12
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm15 = xmm12[0],xmm0[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm15[0,1],ymm14[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 256(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 224(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 192(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 160(%rdi), %xmm14
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm15 = xmm14[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 576(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 544(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 512(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 480(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm15 = xmm2[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 896(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 864(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 832(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 800(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm15 = xmm2[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1216(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1184(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 1152(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1120(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm15 = xmm2[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1536(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1504(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 1472(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1440(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm15 = xmm2[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1856(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 320(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm14 = xmm11[0],xmm0[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 736(%rdi), %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1824(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 1792(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1760(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, (%rsp) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm15 = xmm2[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovaps 704(%rdi), %ymm14
+; AVX1-ONLY-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 672(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vmovaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 640(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm15 = xmm0[0,1],xmm15[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1056(%rdi), %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2176(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vmovaps 1024(%rdi), %ymm14
+; AVX1-ONLY-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 992(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vmovaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 960(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm15 = xmm0[0,1],xmm15[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1376(%rdi), %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2144(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 2112(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2080(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps 1344(%rdi), %ymm14
+; AVX1-ONLY-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 1312(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vmovaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1280(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm15 = xmm0[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm15 = xmm0[0,1],xmm15[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1696(%rdi), %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2496(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vmovaps 1664(%rdi), %ymm14
+; AVX1-ONLY-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 1632(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vmovaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1600(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm15 = xmm0[0,1],xmm15[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2016(%rdi), %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2464(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 2432(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vmovaps 1984(%rdi), %ymm14
+; AVX1-ONLY-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 1952(%rdi), %xmm15
 ; AVX1-ONLY-NEXT:    vmovaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2400(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps 1920(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm15 = xmm0[0,1],xmm15[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2336(%rdi), %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 128(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps 2304(%rdi), %ymm14
+; AVX1-ONLY-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 2272(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vmovaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2240(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 48(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm6 = xmm6[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm15 = xmm0[0,1],xmm15[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1,2,3],ymm14[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 288(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm14
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[0],ymm14[0],ymm2[3],ymm14[2]
+; AVX1-ONLY-NEXT:    vmovdqa 208(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm13[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 448(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps 608(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 368(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[3],ymm0[2]
+; AVX1-ONLY-NEXT:    vmovdqa 528(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 768(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps 928(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 688(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa 848(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm3[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1088(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps 1248(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm4[0],ymm0[0],ymm4[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1008(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm5[0],ymm0[0],ymm5[3],ymm0[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1168(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm3[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm4[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1408(%rdi), %xmm15
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm15, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovaps 1568(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm7[0],ymm0[0],ymm7[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1328(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa 1488(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm5[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm6[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1728(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps 1888(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm9[0],ymm0[0],ymm9[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1648(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa 1808(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm8[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2048(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps 2208(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm11[0],ymm0[0],ymm11[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1968(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm10[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
+; AVX1-ONLY-NEXT:    vmovdqa 2128(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm10[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2368(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps 2528(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm13[0],ymm0[0],ymm13[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 2288(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
+; AVX1-ONLY-NEXT:    vmovdqa 2448(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm12[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 288(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovaps 128(%rdi), %xmm14
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm14, %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 208(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm14[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa 48(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 608(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovaps 448(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm12[0],ymm0[0],ymm12[3],ymm0[2]
+; AVX1-ONLY-NEXT:    vmovdqa 368(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm11[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 768(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 528(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa 688(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 928(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps 1088(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 848(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1248(%rdi), %xmm14
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm14, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[0],ymm1[0],ymm2[3],ymm1[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1168(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1008(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1408(%rdi), %xmm13
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm13, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1328(%rdi), %xmm5
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1728(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[3],ymm1[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1648(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vpalignr $8, (%rsp), %xmm1, %xmm3 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm3 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1568(%rdi), %xmm12
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm12, %ymm0, %ymm2
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[3],ymm2[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1488(%rdi), %xmm13
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm3 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm3 = mem[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1888(%rdi), %xmm10
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm2
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[3],ymm2[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1808(%rdi), %xmm11
-; AVX1-ONLY-NEXT:    vpalignr $8, (%rsp), %xmm11, %xmm3 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm3 = mem[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2208(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[3],ymm2[2]
-; AVX1-ONLY-NEXT:    vmovdqa 2128(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm3 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2528(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2048(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm3
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm6 = ymm0[0],ymm3[0],ymm0[3],ymm3[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1968(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm7 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm7 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2368(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm6
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm6 = ymm0[0],ymm6[0],ymm0[3],ymm6[2]
+; AVX1-ONLY-NEXT:    vmovdqa 2288(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm7 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm7 = mem[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 256(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm7 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm7 = xmm6[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm10
+; AVX1-ONLY-NEXT:    vmovaps 176(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm11 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm11 = xmm7[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 576(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm10 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm10 = xmm11[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm10
+; AVX1-ONLY-NEXT:    vmovaps 496(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm12 = xmm0[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 896(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm10 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm10 = xmm0[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm10
+; AVX1-ONLY-NEXT:    vmovaps 816(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm12 = xmm0[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1216(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm10 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm10 = xmm0[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm10
+; AVX1-ONLY-NEXT:    vmovaps 1136(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm12 = xmm0[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1536(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm10 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm10 = xmm0[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm10
+; AVX1-ONLY-NEXT:    vmovaps 1456(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm12 = xmm0[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1856(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm10 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm10 = xmm0[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm10
+; AVX1-ONLY-NEXT:    vmovaps 1776(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm12 = xmm0[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2176(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm10 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm10 = xmm0[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm10
+; AVX1-ONLY-NEXT:    vmovaps 2096(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm12 = xmm0[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2496(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm10 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm10 = xmm0[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm10
+; AVX1-ONLY-NEXT:    vmovaps 2416(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm12 = xmm0[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2336(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm8 = xmm0[0,1],xmm8[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm8
+; AVX1-ONLY-NEXT:    vmovdqa 2256(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm0[0,1,2,3],xmm9[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2016(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[3],ymm2[2]
-; AVX1-ONLY-NEXT:    vmovdqa 2448(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm3 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 96(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = xmm3[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vpblendw $240, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = xmm2[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX1-ONLY-NEXT:    vmovapd 128(%rdi), %ymm5
-; AVX1-ONLY-NEXT:    vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[3],ymm5[2]
-; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm2[0,1,2,3],xmm7[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, (%rsp) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 256(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm5 = xmm3[0,1],xmm8[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX1-ONLY-NEXT:    vmovapd 288(%rdi), %ymm5
-; AVX1-ONLY-NEXT:    vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[3],ymm5[2]
-; AVX1-ONLY-NEXT:    vmovdqa 224(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 336(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vpblendw $240, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm3 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm3 = xmm2[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 416(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vmovdqa 1936(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1696(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovdqa 1616(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1376(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm0 = xmm2[0,1],xmm13[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovdqa 1296(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0,1,2,3],xmm5[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1056(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = xmm0[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm3
+; AVX1-ONLY-NEXT:    vmovaps 976(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = xmm1[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 736(%rdi), %xmm3
 ; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm5 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm5 = xmm3[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm8
+; AVX1-ONLY-NEXT:    vmovaps 656(%rdi), %xmm5
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm9 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm9 = xmm5[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 416(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm9 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm9 = xmm8[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm9
+; AVX1-ONLY-NEXT:    vmovaps 336(%rdi), %xmm10
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm12 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm12 = xmm10[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm9 = ymm12[0,1,2,3],ymm9[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 96(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm12 = xmm9[0,1],xmm14[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm12, %ymm0, %ymm12
+; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm14
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm15 = xmm14[0,1,2,3],xmm15[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm12 = ymm15[0,1,2,3],ymm12[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm9
+; AVX1-ONLY-NEXT:    vmovapd 128(%rdi), %ymm12
+; AVX1-ONLY-NEXT:    vmovupd %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm9 = ymm9[0],ymm12[0],ymm9[3],ymm12[2]
+; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm12 = xmm14[8,9,10,11,12,13,14,15],xmm12[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm9 = ymm12[0,1],ymm9[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm6
+; AVX1-ONLY-NEXT:    vmovapd 288(%rdi), %ymm9
+; AVX1-ONLY-NEXT:    vmovupd %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm6 = ymm6[0],ymm9[0],ymm6[3],ymm9[2]
+; AVX1-ONLY-NEXT:    vmovdqa 224(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm6
+; AVX1-ONLY-NEXT:    vmovapd 448(%rdi), %ymm7
+; AVX1-ONLY-NEXT:    vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[3],ymm7[2]
+; AVX1-ONLY-NEXT:    vmovdqa 384(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm7 = xmm10[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm6
+; AVX1-ONLY-NEXT:    vmovapd 608(%rdi), %ymm7
+; AVX1-ONLY-NEXT:    vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[3],ymm7[2]
+; AVX1-ONLY-NEXT:    vmovdqa 544(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm7 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX1-ONLY-NEXT:    vmovapd 448(%rdi), %ymm5
+; AVX1-ONLY-NEXT:    vmovapd 768(%rdi), %ymm6
+; AVX1-ONLY-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[0],ymm6[0],ymm3[3],ymm6[2]
+; AVX1-ONLY-NEXT:    vmovdqa 704(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm5 = xmm5[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, (%rsp), %ymm0, %ymm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vmovapd 928(%rdi), %ymm5
 ; AVX1-ONLY-NEXT:    vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[3],ymm5[2]
-; AVX1-ONLY-NEXT:    vmovdqa 384(%rdi), %xmm5
+; AVX1-ONLY-NEXT:    vmovdqa 864(%rdi), %xmm5
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 496(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm2[0,1,2,3],xmm6[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 576(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm5 = xmm3[0,1],xmm4[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX1-ONLY-NEXT:    vmovapd 608(%rdi), %ymm4
-; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[3],ymm4[2]
-; AVX1-ONLY-NEXT:    vmovdqa 544(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 656(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vpblendw $240, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm3 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm3 = xmm2[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 736(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = xmm3[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
-; AVX1-ONLY-NEXT:    vmovapd 768(%rdi), %ymm4
-; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[3],ymm4[2]
-; AVX1-ONLY-NEXT:    vmovdqa 704(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 816(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 896(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm3 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm3 = xmm0[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = mem[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm3, (%rsp) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovapd 928(%rdi), %ymm3
-; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[3],ymm3[2]
-; AVX1-ONLY-NEXT:    vmovdqa 864(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 976(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpblendw $240, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = xmm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1056(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm3 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm3 = xmm2[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
 ; AVX1-ONLY-NEXT:    vmovapd 1088(%rdi), %ymm3
 ; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[3],ymm3[2]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[3],ymm3[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 1024(%rdi), %xmm3
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 1136(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vmovapd 1248(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1184(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1216(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm1[0,1],xmm14[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vmovapd 1248(%rdi), %ymm2
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[3],ymm2[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1184(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 1296(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpblendw $240, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = xmm0[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 1408(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1344(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1376(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm1[0,1],xmm15[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vmovapd 1408(%rdi), %ymm2
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[3],ymm2[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1344(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm4[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 1456(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm13[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1536(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm1[0,1],xmm12[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    vmovapd 1568(%rdi), %ymm14
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm14[0],ymm1[3],ymm14[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1504(%rdi), %xmm15
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm14[0],ymm0[3],ymm14[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1504(%rdi), %xmm13
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 1616(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpblendw $240, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = xmm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1696(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = xmm1[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    vmovapd 1728(%rdi), %ymm12
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm12[0],ymm1[3],ymm12[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1664(%rdi), %xmm13
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm12[0],ymm0[3],ymm12[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1664(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 1776(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm11[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1856(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm1[0,1],xmm10[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    vmovapd 1888(%rdi), %ymm10
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm10[0],ymm1[3],ymm10[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1824(%rdi), %xmm11
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm10[0],ymm0[3],ymm10[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1824(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 1936(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm9[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2016(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = xmm1[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    vmovapd 2048(%rdi), %ymm8
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm8[0],ymm1[3],ymm8[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1984(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm8[0],ymm0[3],ymm8[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1984(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 2096(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpblendw $240, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = xmm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2176(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = xmm1[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    vmovapd 2208(%rdi), %ymm6
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm6[0],ymm1[3],ymm6[2]
-; AVX1-ONLY-NEXT:    vmovdqa 2144(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm6[0],ymm0[3],ymm6[2]
+; AVX1-ONLY-NEXT:    vmovdqa 2144(%rdi), %xmm5
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 2256(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpblendw $240, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = xmm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2336(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = xmm1[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    vmovapd 2368(%rdi), %ymm4
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm4[0],ymm1[3],ymm4[2]
-; AVX1-ONLY-NEXT:    vmovdqa 2304(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[3],ymm4[2]
+; AVX1-ONLY-NEXT:    vmovdqa 2304(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 2416(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpblendw $240, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = xmm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2496(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = xmm1[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    vmovapd 2528(%rdi), %ymm2
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[3],ymm2[2]
-; AVX1-ONLY-NEXT:    vmovdqa 2464(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[3],ymm2[2]
+; AVX1-ONLY-NEXT:    vmovdqa 2464(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm15 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm15 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm15 = mem[0,1],xmm15[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm15 = mem[0,1],xmm15[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm15 = mem[0,1],xmm15[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm15 = mem[0,1],xmm15[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm15 = mem[0,1],xmm15[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm15 = mem[0,1],xmm15[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm15 = mem[0,1],xmm15[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm15 = mem[0,1],xmm15[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1],xmm1[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm15 = mem[0,1],xmm15[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm15 = ymm15[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm14[3]
-; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm14 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm14 = mem[0,1,2,3],xmm15[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm14 = ymm14[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm13 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm13 = mem[0,1,2,3],xmm13[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm13 = ymm13[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm12[3]
-; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm12 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm12 = mem[0,1,2,3],xmm13[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm12 = ymm12[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm11 = mem[0,1,2,3],xmm11[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm11 = ymm11[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm10[3]
-; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm10 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm10 = mem[0,1,2,3],xmm11[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm10 = ymm10[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm9 = mem[0,1,2,3],xmm9[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm9 = ymm9[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm8[3]
-; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm8 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm8 = mem[0,1,2,3],xmm9[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm8 = ymm8[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm7 = mem[0,1,2,3],xmm7[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm7[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm6[3]
-; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm6 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm6 = mem[0,1,2,3],xmm7[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm6 = ymm6[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = mem[0,1,2,3],xmm5[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm4[3]
-; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = mem[0,1,2,3],xmm5[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm2 = mem[0,1,2],ymm2[3]
 ; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm3 = mem[0,1,2,3],xmm3[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm3[0,1],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 480(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 416(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 352(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 288(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 224(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 160(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 96(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 32(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 448(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 384(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 320(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 256(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 192(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 128(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 64(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, (%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 480(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 416(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 352(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 288(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 224(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 160(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 96(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 32(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 448(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 384(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 320(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 256(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 192(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 128(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 64(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, (%rdx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 496(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 480(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 432(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 416(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 368(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 352(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 304(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 288(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 240(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 224(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 176(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 160(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 112(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 96(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 48(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps (%rsp), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 32(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 16(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, (%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 80(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 64(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 144(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 128(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 208(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 192(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 272(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 256(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 336(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 320(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 400(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 384(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 464(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 448(%rcx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 480(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 448(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 416(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 384(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 352(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 320(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 288(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 256(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 224(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 192(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 160(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 128(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 96(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 64(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 32(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, (%r8)
-; AVX1-ONLY-NEXT:    vmovapd %ymm4, 480(%r9)
-; AVX1-ONLY-NEXT:    vmovapd %ymm0, 448(%r9)
-; AVX1-ONLY-NEXT:    vmovapd %ymm6, 416(%r9)
-; AVX1-ONLY-NEXT:    vmovapd %ymm8, 384(%r9)
-; AVX1-ONLY-NEXT:    vmovapd %ymm10, 352(%r9)
-; AVX1-ONLY-NEXT:    vmovapd %ymm12, 320(%r9)
-; AVX1-ONLY-NEXT:    vmovapd %ymm14, 288(%r9)
-; AVX1-ONLY-NEXT:    vmovaps %ymm1, 256(%r9)
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm0 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm2[3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 448(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 384(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 320(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 256(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 192(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 128(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 64(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, (%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 480(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 416(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 352(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 288(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 224(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 160(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 96(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 32(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 448(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 384(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 320(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 256(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 192(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 128(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 64(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, (%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 480(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 416(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 352(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 288(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 224(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 160(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 96(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 32(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, (%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 64(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 128(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 192(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 256(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 320(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 384(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 448(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 480(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 416(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 352(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 288(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 224(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 160(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 96(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 32(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 480(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 448(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 416(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 384(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 352(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 320(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 288(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 256(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 224(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 192(%r8)
+; AVX1-ONLY-NEXT:    vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 160(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 128(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 96(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 64(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 32(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, (%r8)
+; AVX1-ONLY-NEXT:    vmovapd %ymm0, 480(%r9)
+; AVX1-ONLY-NEXT:    vmovapd %ymm3, 448(%r9)
+; AVX1-ONLY-NEXT:    vmovapd %ymm5, 416(%r9)
+; AVX1-ONLY-NEXT:    vmovapd %ymm7, 384(%r9)
+; AVX1-ONLY-NEXT:    vmovapd %ymm9, 352(%r9)
+; AVX1-ONLY-NEXT:    vmovapd %ymm11, 320(%r9)
+; AVX1-ONLY-NEXT:    vmovapd %ymm13, 288(%r9)
+; AVX1-ONLY-NEXT:    vmovaps %ymm15, 256(%r9)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 224(%r9)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -4707,7 +4728,7 @@ define void @load_i64_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 32(%r9)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, (%r9)
-; AVX1-ONLY-NEXT:    addq $3192, %rsp # imm = 0xC78
+; AVX1-ONLY-NEXT:    addq $3288, %rsp # imm = 0xCD8
 ; AVX1-ONLY-NEXT:    vzeroupper
 ; AVX1-ONLY-NEXT:    retq
 ;
@@ -5052,97 +5073,113 @@ define void @load_i64_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 256(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 480(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 576(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 800(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 896(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 1120(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 1216(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 1440(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 1536(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 1760(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 1856(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 2080(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 2176(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 2400(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 2496(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 2240(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 2336(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 1920(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 2016(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 1600(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 1696(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 1280(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 1376(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 960(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 1056(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 640(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 736(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps 320(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 416(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovaps (%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3],ymm0[4,5],mem[6,7]
 ; AVX2-ONLY-NEXT:    vmovaps 96(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
-; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm0 # 32-byte Folded Reload
 ; AVX2-ONLY-NEXT:    # ymm0 = mem[8,9,10,11,12,13,14,15],ymm12[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm12[16,17,18,19,20,21,22,23]

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
index 225d1f72f3912..49a2bfaeb0539 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i64-stride-7.ll
@@ -219,7 +219,8 @@ define void @load_i64_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm7 = ymm7[1],ymm9[0],ymm7[2],ymm9[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm10
 ; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm8 = xmm8[8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm7, %ymm7
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm8
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm7[0,1],ymm8[2,3]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm8
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm8 = ymm2[0,1,2],ymm8[3]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm9 = mem[0],xmm9[1]
@@ -234,7 +235,7 @@ define void @load_i64_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vmovapd %ymm6, (%rsi)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm5, (%rdx)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm4, (%rcx)
-; AVX1-ONLY-NEXT:    vmovaps %ymm7, (%r8)
+; AVX1-ONLY-NEXT:    vmovapd %ymm7, (%r8)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm8, (%r9)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm2, (%r10)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm0, (%rax)
@@ -268,7 +269,8 @@ define void @load_i64_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm5 = mem[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm8
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm9 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm5[2,3],ymm9[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm5[2,3],ymm9[2,3]
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm8 = ymm2[0,1,2,3,4,5],ymm8[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa 32(%rdi), %xmm9
@@ -317,7 +319,8 @@ define void @load_i64_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-SLOW-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
 ; AVX512F-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm8
 ; AVX512F-SLOW-NEXT:    vpalignr {{.*#+}} xmm6 = xmm6[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm7[2,3],ymm6[0,1]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512F-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm7[2,3],ymm6[2,3]
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm7
 ; AVX512F-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm8
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6,7]
@@ -369,7 +372,8 @@ define void @load_i64_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-FAST-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
 ; AVX512F-FAST-NEXT:    vmovdqa 192(%rdi), %xmm8
 ; AVX512F-FAST-NEXT:    vpalignr {{.*#+}} xmm9 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm7[2,3],ymm9[0,1]
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512F-FAST-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm7[2,3],ymm9[2,3]
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm6[0,1,2,3,4,5],ymm8[6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [4,11]
@@ -420,7 +424,8 @@ define void @load_i64_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-SLOW-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
 ; AVX512BW-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm8
 ; AVX512BW-SLOW-NEXT:    vpalignr {{.*#+}} xmm6 = xmm6[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm7[2,3],ymm6[0,1]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512BW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm6 = ymm7[2,3],ymm6[2,3]
 ; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm7
 ; AVX512BW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm8
 ; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],ymm7[6,7]
@@ -472,7 +477,8 @@ define void @load_i64_stride7_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-FAST-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
 ; AVX512BW-FAST-NEXT:    vmovdqa 192(%rdi), %xmm8
 ; AVX512BW-FAST-NEXT:    vpalignr {{.*#+}} xmm9 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
-; AVX512BW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm7[2,3],ymm9[0,1]
+; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512BW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm7[2,3],ymm9[2,3]
 ; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
 ; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm8 = ymm6[0,1,2,3,4,5],ymm8[6,7]
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [4,11]
@@ -683,12 +689,14 @@ define void @load_i64_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm13 = ymm13[1],ymm12[0],ymm13[2],ymm12[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 416(%rdi), %xmm15
 ; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm10 = xmm10[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm13, %ymm10
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm10
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm10 = ymm13[0,1],ymm10[2,3]
 ; AVX1-ONLY-NEXT:    vmovapd 80(%rdi), %xmm13
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm11 = ymm11[1],ymm13[0],ymm11[2],ymm13[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm14 = xmm14[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm14, %ymm11, %ymm11
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm14, %ymm0, %ymm14
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm11 = ymm11[0,1],ymm14[2,3]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm15, %ymm0, %ymm14
 ; AVX1-ONLY-NEXT:    vmovapd 352(%rdi), %ymm15
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm14 = ymm15[0,1,2],ymm14[3]
@@ -726,8 +734,8 @@ define void @load_i64_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vmovapd %ymm6, 32(%rdx)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm9, (%rcx)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm8, 32(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps %ymm11, (%r8)
-; AVX1-ONLY-NEXT:    vmovaps %ymm10, 32(%r8)
+; AVX1-ONLY-NEXT:    vmovapd %ymm11, (%r8)
+; AVX1-ONLY-NEXT:    vmovapd %ymm10, 32(%r8)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm2, (%r9)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm12, 32(%r9)
 ; AVX1-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
@@ -785,11 +793,13 @@ define void @load_i64_stride7_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm12 = mem[8,9,10,11,12,13,14,15],ymm12[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm12[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 416(%rdi), %xmm13
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm14 = mem[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm12[2,3],ymm14[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm12 = ymm12[2,3],ymm14[2,3]
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm10 = mem[8,9,10,11,12,13,14,15],ymm10[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm10[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm14
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm15 = mem[8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm10 = ymm10[2,3],ymm15[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm10 = ymm10[2,3],ymm15[2,3]
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm13 = ymm3[0,1,2,3,4,5],ymm13[6,7]
 ; AVX2-ONLY-NEXT:    vmovdqa 256(%rdi), %xmm15
@@ -1347,7 +1357,7 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX1-ONLY-LABEL: load_i64_stride7_vf16:
 ; AVX1-ONLY:       # %bb.0:
-; AVX1-ONLY-NEXT:    subq $568, %rsp # imm = 0x238
+; AVX1-ONLY-NEXT:    subq $552, %rsp # imm = 0x228
 ; AVX1-ONLY-NEXT:    vmovapd 544(%rdi), %ymm0
 ; AVX1-ONLY-NEXT:    vmovapd 96(%rdi), %ymm1
 ; AVX1-ONLY-NEXT:    vmovaps 768(%rdi), %ymm2
@@ -1365,8 +1375,9 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm2[0,1,2,3,4,5],ymm4[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps 672(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vmovaps 720(%rdi), %xmm15
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm7 = xmm6[0,1],xmm15[2,3]
+; AVX1-ONLY-NEXT:    vmovaps 720(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm7 = xmm6[0,1],xmm7[2,3]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm7[0,1,2,3],ymm4[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 160(%rdi), %ymm0, %ymm4
@@ -1398,179 +1409,182 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[3],ymm4[2]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 160(%rdi), %ymm2
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[3],ymm2[2]
-; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm6 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm6[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 160(%rdi), %ymm6
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm6[0],ymm1[3],ymm6[2]
+; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 608(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[2]
-; AVX1-ONLY-NEXT:    vmovdqa 512(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm7[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 608(%rdi), %ymm9
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm9[0],ymm0[3],ymm9[2]
+; AVX1-ONLY-NEXT:    vmovdqa 512(%rdi), %xmm10
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm7[8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 800(%rdi), %xmm7
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3]
 ; AVX1-ONLY-NEXT:    vmovapd 688(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm8 = xmm4[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm8[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm4[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 352(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovdqa 352(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm5[3]
 ; AVX1-ONLY-NEXT:    vmovapd 240(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm9 = xmm5[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm5[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm6[3]
+; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm6[0,1,2,3],xmm8[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 576(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm9[3]
+; AVX1-ONLY-NEXT:    vmovdqa 464(%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm12[0,1,2,3],xmm10[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm9[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm12
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm12, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3]
-; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm2[0,1,2,3],xmm3[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 752(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm9 = ymm4[1],ymm11[0],ymm4[2],ymm11[2]
+; AVX1-ONLY-NEXT:    vmovdqa 864(%rdi), %xmm10
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm9[0,1],ymm7[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, (%rsp) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 304(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm5[1],ymm7[0],ymm5[2],ymm7[2]
+; AVX1-ONLY-NEXT:    vmovdqa 416(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm5[0,1],ymm2[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 576(%rdi), %xmm10
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
-; AVX1-ONLY-NEXT:    vmovdqa 464(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm6 = xmm1[0,1,2,3],xmm6[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 80(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm6[1],ymm8[0],ymm6[2],ymm8[2]
+; AVX1-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm3 = xmm3[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm5[0,1],ymm3[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 752(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[1],ymm6[0],ymm4[2],ymm6[2]
-; AVX1-ONLY-NEXT:    vmovdqa 864(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm4, %ymm0
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 304(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm5[1],ymm4[0],ymm5[2],ymm4[2]
-; AVX1-ONLY-NEXT:    vmovdqa 416(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm8 = xmm8[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm5, %ymm0
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 80(%rdi), %xmm11
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[1],ymm11[0],ymm2[2],ymm11[2]
-; AVX1-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm8 = xmm12[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm2, %ymm0
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 528(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],ymm2[0],ymm1[2],ymm2[2]
+; AVX1-ONLY-NEXT:    vmovapd 528(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm12[1],ymm4[0],ymm12[2],ymm4[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 640(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm8 = xmm10[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm1, %ymm13
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vmovapd 352(%rdi), %ymm10
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm10[0,1,2],ymm1[3]
-; AVX1-ONLY-NEXT:    vmovapd 256(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = xmm7[0],xmm4[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm14 = ymm4[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm13 = ymm5[0,1],ymm1[2,3]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vmovapd 800(%rdi), %ymm3
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3]
-; AVX1-ONLY-NEXT:    vmovapd 704(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = xmm9[0],xmm6[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vmovapd 128(%rdi), %ymm6
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm6[0,1,2],ymm1[3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = mem[0],xmm11[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm12 = ymm4[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 352(%rdi), %ymm5
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm5[0,1,2],ymm1[3]
+; AVX1-ONLY-NEXT:    vmovapd 256(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm7 = xmm3[0],xmm7[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm14 = ymm7[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vmovapd 800(%rdi), %ymm10
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm10[0,1,2],ymm1[3]
+; AVX1-ONLY-NEXT:    vmovapd 704(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm11 = xmm7[0],xmm11[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm12 = ymm11[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vmovapd 128(%rdi), %ymm9
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm9[0,1,2],ymm1[3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm6 = mem[0],xmm8[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm11 = ymm6[0,1],ymm1[2,3]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovapd 576(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm1[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 480(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm5 = xmm0[0],xmm2[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm8 = ymm5[0,1],ymm4[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 416(%rdi), %ymm5
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm10 = ymm10[0],ymm5[0],ymm10[3],ymm5[2]
-; AVX1-ONLY-NEXT:    vmovdqa 320(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm10 = ymm7[0,1],ymm10[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 864(%rdi), %ymm11
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[0],ymm11[0],ymm3[3],ymm11[2]
-; AVX1-ONLY-NEXT:    vmovdqa 768(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm9 = xmm9[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm9[0,1],ymm3[2,3]
-; AVX1-ONLY-NEXT:    vmovdqa 544(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovapd 640(%rdi), %ymm2
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[3],ymm2[2]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm9 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd 480(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = xmm8[0],xmm4[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm6 = ymm4[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 416(%rdi), %ymm4
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm5[0],ymm4[0],ymm5[3],ymm4[2]
+; AVX1-ONLY-NEXT:    vmovdqa 320(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm3 = xmm3[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm5 = ymm3[0,1],ymm5[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 864(%rdi), %ymm3
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm10 = ymm10[0],ymm3[0],ymm10[3],ymm3[2]
+; AVX1-ONLY-NEXT:    vmovdqa 768(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm7[0,1],ymm10[2,3]
+; AVX1-ONLY-NEXT:    vmovdqa 544(%rdi), %xmm10
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm8 = xmm8[8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovapd 640(%rdi), %ymm10
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm10[0],ymm1[3],ymm10[2]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm8 = ymm8[0,1],ymm1[2,3]
 ; AVX1-ONLY-NEXT:    vmovapd 192(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm6[0],ymm1[0],ymm6[3],ymm1[2]
-; AVX1-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm6 = mem[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm5 = mem[0,1,2],ymm5[3]
-; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = mem[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm5 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm5 = mem[0,1,2],ymm11[3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm6 = xmm15[0,1],xmm7[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm15 = ymm6[0,1,2,3],ymm5[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm1 = mem[0,1,2],ymm1[3]
-; AVX1-ONLY-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm5 = xmm5[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm5[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm2 = mem[0,1,2],ymm2[3]
-; AVX1-ONLY-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm5 = xmm5[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm5, 64(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm5, (%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm5, 96(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm5, 32(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm5, 64(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm5, (%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm5, 96(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm5, 32(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm5, 64(%rcx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm5, (%rcx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm5, 32(%rcx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm5, 96(%rcx)
-; AVX1-ONLY-NEXT:    vmovaps %ymm13, 64(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm5, (%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm5, 32(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm5, 96(%r8)
-; AVX1-ONLY-NEXT:    vmovapd %ymm8, 64(%r9)
-; AVX1-ONLY-NEXT:    vmovapd %ymm12, (%r9)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm5, 96(%r9)
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm9 = ymm9[0],ymm1[0],ymm9[3],ymm1[2]
+; AVX1-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm15 = mem[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm9 = ymm15[0,1],ymm9[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm4 = mem[0,1,2],ymm4[3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm2 = mem[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm4[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm3 = mem[0,1,2],ymm3[3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm0 = mem[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm15 = ymm0[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm1[3]
+; AVX1-ONLY-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm1[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm1 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm1 = mem[0,1,2],ymm10[3]
+; AVX1-ONLY-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm3 = xmm3[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm3, 64(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm3, (%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm3, 96(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm3, 32(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm3, 64(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm3, (%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm3, 96(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm3, 32(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm3, 64(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm3, (%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm3, 32(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm3, 96(%rcx)
+; AVX1-ONLY-NEXT:    vmovapd %ymm13, 64(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm3, (%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm3, 32(%r8)
+; AVX1-ONLY-NEXT:    vmovups (%rsp), %ymm3 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm3, 96(%r8)
+; AVX1-ONLY-NEXT:    vmovapd %ymm6, 64(%r9)
+; AVX1-ONLY-NEXT:    vmovapd %ymm11, (%r9)
+; AVX1-ONLY-NEXT:    vmovapd %ymm12, 96(%r9)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm14, 32(%r9)
 ; AVX1-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-ONLY-NEXT:    vmovapd %ymm0, (%rax)
-; AVX1-ONLY-NEXT:    vmovapd %ymm9, 64(%rax)
-; AVX1-ONLY-NEXT:    vmovapd %ymm3, 96(%rax)
-; AVX1-ONLY-NEXT:    vmovapd %ymm10, 32(%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm9, (%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm8, 64(%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm7, 96(%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm5, 32(%rax)
 ; AVX1-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-ONLY-NEXT:    vmovapd %ymm2, 64(%rax)
-; AVX1-ONLY-NEXT:    vmovapd %ymm1, (%rax)
-; AVX1-ONLY-NEXT:    vmovaps %ymm15, 96(%rax)
-; AVX1-ONLY-NEXT:    vmovapd %ymm4, 32(%rax)
-; AVX1-ONLY-NEXT:    addq $568, %rsp # imm = 0x238
+; AVX1-ONLY-NEXT:    vmovapd %ymm1, 64(%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm0, (%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm15, 96(%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm2, 32(%rax)
+; AVX1-ONLY-NEXT:    addq $552, %rsp # imm = 0x228
 ; AVX1-ONLY-NEXT:    vzeroupper
 ; AVX1-ONLY-NEXT:    retq
 ;
@@ -1669,25 +1683,29 @@ define void @load_i64_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 864(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 288(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 416(%rdi), %xmm2
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm3 = mem[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm3[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm3[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm3
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm4 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm4[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm4[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 512(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 640(%rdi), %xmm4
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm5 = mem[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm15 = ymm0[2,3],ymm5[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm15 = ymm0[2,3],ymm5[2,3]
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
 ; AVX2-ONLY-NEXT:    vmovdqa 352(%rdi), %ymm7
 ; AVX2-ONLY-NEXT:    vpblendd {{.*#+}} ymm0 = ymm7[0,1,2,3,4,5],ymm0[6,7]
@@ -2749,511 +2767,512 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-LABEL: load_i64_stride7_vf32:
 ; AVX1-ONLY:       # %bb.0:
 ; AVX1-ONLY-NEXT:    subq $1720, %rsp # imm = 0x6B8
-; AVX1-ONLY-NEXT:    vmovaps 1216(%rdi), %ymm4
-; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1216(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 768(%rdi), %ymm5
 ; AVX1-ONLY-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 320(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vmovaps 320(%rdi), %ymm10
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 384(%rdi), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm10[0,1,2,3,4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps 224(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vmovaps 272(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovaps 272(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm4 = xmm3[0,1],xmm0[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm2[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 832(%rdi), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 672(%rdi), %xmm13
-; AVX1-ONLY-NEXT:    vmovaps 720(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm13[0,1],xmm2[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 672(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vmovaps 720(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm5 = xmm4[0,1],xmm0[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm2[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 1280(%rdi), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps 1120(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vmovaps 1168(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm6[0,1],xmm2[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovaps 1168(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm5 = xmm6[0,1],xmm0[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm2[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1664(%rdi), %ymm9
-; AVX1-ONLY-NEXT:    vinsertf128 $1, 1728(%rdi), %ymm0, %ymm2
-; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm9[0,1,2,3,4,5],ymm2[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 1664(%rdi), %ymm5
+; AVX1-ONLY-NEXT:    vinsertf128 $1, 1728(%rdi), %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3,4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps 1568(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vmovaps 1616(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm4 = xmm7[0,1],xmm4[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 96(%rdi), %ymm8
+; AVX1-ONLY-NEXT:    vmovaps 1616(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm8 = xmm7[0,1],xmm0[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 96(%rdi), %ymm14
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 160(%rdi), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm8[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm14[0,1,2],ymm0[3]
 ; AVX1-ONLY-NEXT:    vmovapd 48(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = mem[0],xmm0[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 544(%rdi), %ymm10
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm8 = mem[0],xmm0[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm8[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 544(%rdi), %ymm9
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 608(%rdi), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm10[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm9[0,1,2],ymm0[3]
 ; AVX1-ONLY-NEXT:    vmovapd 448(%rdi), %xmm11
 ; AVX1-ONLY-NEXT:    vmovapd 496(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = xmm11[0],xmm0[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm8 = xmm11[0],xmm0[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm8[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 992(%rdi), %ymm12
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 1056(%rdi), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm12[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 896(%rdi), %xmm14
+; AVX1-ONLY-NEXT:    vmovapd 896(%rdi), %xmm13
 ; AVX1-ONLY-NEXT:    vmovapd 944(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = xmm14[0],xmm0[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm8 = xmm13[0],xmm0[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm8[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 1440(%rdi), %ymm15
-; AVX1-ONLY-NEXT:    vinsertf128 $1, 1504(%rdi), %ymm0, %ymm2
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm15[0,1,2],ymm2[3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, 1504(%rdi), %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm8 = ymm15[0,1,2],ymm0[3]
 ; AVX1-ONLY-NEXT:    vmovapd 1344(%rdi), %xmm2
 ; AVX1-ONLY-NEXT:    vmovapd 1392(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm5 = xmm2[0],xmm0[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 288(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm3 = xmm3[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovapd 384(%rdi), %ymm4
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm1[0],ymm4[0],ymm1[3],ymm4[2]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm5[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 736(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm5 = xmm13[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm8[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 288(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm3[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovapd 384(%rdi), %ymm8
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm3 = ymm10[0],ymm8[0],ymm10[3],ymm8[2]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 736(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm4[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovapd 832(%rdi), %ymm3
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm0[0],ymm3[0],ymm0[3],ymm3[2]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm5[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 1184(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm5 = xmm6[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[3],ymm3[2]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 1184(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm6[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovapd 1280(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm6 = ymm0[0],ymm1[0],ymm0[3],ymm1[2]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm6[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 1632(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm6 = xmm7[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovapd 1728(%rdi), %ymm5
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm9[0],ymm5[0],ymm9[3],ymm5[2]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[0],ymm1[0],ymm4[3],ymm1[2]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 160(%rdi), %ymm6
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm8[0],ymm6[0],ymm8[3],ymm6[2]
-; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm7 = mem[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm7[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovdqa 1632(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm7[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovapd 1728(%rdi), %ymm4
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm5[0],ymm4[0],ymm5[3],ymm4[2]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 160(%rdi), %ymm5
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm14[0],ymm5[0],ymm14[3],ymm5[2]
+; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm10
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm6 = mem[8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 608(%rdi), %ymm8
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm10[0],ymm8[0],ymm10[3],ymm8[2]
+; AVX1-ONLY-NEXT:    vmovapd 608(%rdi), %ymm14
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm9[0],ymm14[0],ymm9[3],ymm14[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 512(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm10 = xmm11[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm10[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm6 = xmm11[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1056(%rdi), %ymm13
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm12[0],ymm13[0],ymm12[3],ymm13[2]
-; AVX1-ONLY-NEXT:    vmovdqa 960(%rdi), %xmm10
-; AVX1-ONLY-NEXT:    vmovdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm10 = xmm14[8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm10[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 1056(%rdi), %ymm11
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm12[0],ymm11[0],ymm12[3],ymm11[2]
+; AVX1-ONLY-NEXT:    vmovdqa 960(%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm6 = xmm13[8,9,10,11,12,13,14,15],xmm12[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1504(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm10 = ymm15[0],ymm0[0],ymm15[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1408(%rdi), %xmm14
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm10[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 352(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3]
-; AVX1-ONLY-NEXT:    vmovapd 240(%rdi), %xmm10
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = xmm10[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 1504(%rdi), %ymm13
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm6 = ymm15[0],ymm13[0],ymm15[3],ymm13[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1408(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm6[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 800(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3]
-; AVX1-ONLY-NEXT:    vmovapd 688(%rdi), %xmm15
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm3 = xmm15[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovdqa 352(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1,2],ymm8[3]
+; AVX1-ONLY-NEXT:    vmovapd 240(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm8 = xmm9[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm8[0,1],ymm2[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1248(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3]
-; AVX1-ONLY-NEXT:    vmovapd 1136(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm2 = xmm3[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 1696(%rdi), %xmm11
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3]
-; AVX1-ONLY-NEXT:    vmovapd 1584(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm2 = xmm4[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 1472(%rdi), %xmm12
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm12, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovdqa 1360(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm14[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovdqa 800(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3]
+; AVX1-ONLY-NEXT:    vmovapd 688(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm15 = xmm2[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm15[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1248(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm1[3]
+; AVX1-ONLY-NEXT:    vmovapd 1136(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm15 = xmm1[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm15[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1696(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3]
+; AVX1-ONLY-NEXT:    vmovapd 1584(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = xmm15[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 1472(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm13[3]
+; AVX1-ONLY-NEXT:    vmovdqa 1360(%rdi), %xmm13
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 128(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm6[3]
-; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm9[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm5[3]
+; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm10[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 576(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm8[3]
+; AVX1-ONLY-NEXT:    vmovdqa 576(%rdi), %xmm10
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm14[3]
 ; AVX1-ONLY-NEXT:    vmovdqa 464(%rdi), %xmm14
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm14[0,1,2,3],xmm7[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm14[0,1,2,3],xmm7[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 1024(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm5 = ymm0[0,1,2],ymm13[3]
-; AVX1-ONLY-NEXT:    vmovapd 912(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vblendpd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm6 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm6 = xmm0[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 304(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm10[1],ymm8[0],ymm10[2],ymm8[2]
-; AVX1-ONLY-NEXT:    vmovdqa 416(%rdi), %xmm13
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13, %xmm6 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm6 = mem[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
-; AVX1-ONLY-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 1024(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm0[0,1,2],ymm11[3]
+; AVX1-ONLY-NEXT:    vmovdqa 912(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm0[0,1,2,3],xmm12[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 304(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm4 = ymm9[1],ymm7[0],ymm9[2],ymm7[2]
+; AVX1-ONLY-NEXT:    vmovdqa 416(%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm5 = xmm6[8,9,10,11,12,13,14,15],xmm12[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 752(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm6 = ymm15[1],ymm5[0],ymm15[2],ymm5[2]
-; AVX1-ONLY-NEXT:    vmovdqa 864(%rdi), %xmm10
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm7 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm7 = mem[8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm6
-; AVX1-ONLY-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1200(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[1],ymm6[0],ymm3[2],ymm6[2]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[1],ymm5[0],ymm2[2],ymm5[2]
+; AVX1-ONLY-NEXT:    vmovdqa 864(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm4 = xmm8[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm4[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1200(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],ymm2[0],ymm1[2],ymm2[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 1312(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm6 = mem[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm6, %ymm3, %ymm3
-; AVX1-ONLY-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1648(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm3 = ymm4[1],ymm3[0],ymm4[2],ymm3[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1760(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm4 = xmm11[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
-; AVX1-ONLY-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1424(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[1],ymm3[0],ymm2[2],ymm3[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1536(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm4 = xmm12[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm2, %ymm2
-; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 976(%rdi), %xmm12
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm0[1],ymm12[0],ymm0[2],ymm12[2]
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm2 = mem[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1648(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm15[1],ymm1[0],ymm15[2],ymm1[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1760(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm2 = mem[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1424(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm13[1],ymm1[0],ymm13[2],ymm1[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1536(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm2 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm2 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 976(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm0[1],ymm4[0],ymm0[2],ymm4[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 1088(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 528(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm14[1],ymm2[0],ymm14[2],ymm2[2]
-; AVX1-ONLY-NEXT:    vmovdqa 640(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm6 = xmm9[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 80(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm6 = ymm6[1],ymm1[0],ymm6[2],ymm1[2]
-; AVX1-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm9 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm9 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm9, %ymm6, %ymm6
-; AVX1-ONLY-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm6
-; AVX1-ONLY-NEXT:    vmovapd 128(%rdi), %ymm11
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm6 = ymm11[0,1,2],ymm6[3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = mem[0],xmm1[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm6[2,3]
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = xmm3[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm13, %ymm0, %ymm6
-; AVX1-ONLY-NEXT:    vmovapd 352(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm6 = ymm1[0,1,2],ymm6[3]
+; AVX1-ONLY-NEXT:    vmovapd 528(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm14[1],ymm1[0],ymm14[2],ymm1[2]
+; AVX1-ONLY-NEXT:    vmovdqa 640(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm10 = xmm10[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm10
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm10[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 80(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm10 = ymm10[1],ymm2[0],ymm10[2],ymm2[2]
+; AVX1-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm13 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm13 = mem[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm13, %ymm0, %ymm13
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm10 = ymm10[0,1],ymm13[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm10
+; AVX1-ONLY-NEXT:    vmovapd 128(%rdi), %ymm13
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm10 = ymm13[0,1,2],ymm10[3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm2 = mem[0],xmm2[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm10[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm12, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vmovapd 352(%rdi), %ymm11
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm11[0,1,2],ymm2[3]
 ; AVX1-ONLY-NEXT:    vmovapd 256(%rdi), %xmm15
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm7 = xmm15[0],xmm8[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm0, %ymm4
-; AVX1-ONLY-NEXT:    vmovapd 576(%rdi), %ymm6
-; AVX1-ONLY-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm6[0,1,2],ymm4[3]
-; AVX1-ONLY-NEXT:    vmovapd 480(%rdi), %xmm14
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm2 = xmm14[0],xmm2[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm4[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm7 = xmm15[0],xmm7[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm7[0,1],ymm2[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm4
-; AVX1-ONLY-NEXT:    vmovapd 800(%rdi), %ymm13
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm13[0,1,2],ymm4[3]
-; AVX1-ONLY-NEXT:    vmovapd 704(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm5 = xmm2[0],xmm5[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm5[0,1],ymm4[2,3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vmovapd 576(%rdi), %ymm3
+; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3]
+; AVX1-ONLY-NEXT:    vmovapd 480(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm3[0],xmm1[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vmovapd 800(%rdi), %ymm12
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm12[0,1,2],ymm1[3]
+; AVX1-ONLY-NEXT:    vmovapd 704(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm5 = xmm1[0],xmm5[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovapd 1024(%rdi), %ymm2
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 928(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = xmm9[0],xmm12[1]
+; AVX1-ONLY-NEXT:    vmovapd 1024(%rdi), %ymm14
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm14[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd 928(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = xmm2[0],xmm4[1]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovapd 1248(%rdi), %ymm6
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 1152(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vblendpd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = xmm8[0],mem[1]
+; AVX1-ONLY-NEXT:    vmovapd 1152(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vblendpd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm4 = xmm9[0],mem[1]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovaps 1472(%rdi), %ymm10
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 1376(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm3 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm3 = xmm7[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vmovapd 1696(%rdi), %ymm4
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 1600(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm3 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm3 = xmm2[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 192(%rdi), %ymm12
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm11[0],ymm12[0],ymm11[3],ymm12[2]
-; AVX1-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm3 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 416(%rdi), %ymm11
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm11[0],ymm1[3],ymm11[2]
-; AVX1-ONLY-NEXT:    vmovdqa 320(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm15[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 544(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm14[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovapd 640(%rdi), %ymm2
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm14 = ymm1[0],ymm2[0],ymm1[3],ymm2[2]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm14[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 864(%rdi), %ymm15
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm13[0],ymm15[0],ymm13[3],ymm15[2]
-; AVX1-ONLY-NEXT:    vmovdqa 768(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm13 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm13 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm13[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 992(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm9 = xmm9[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovapd 1088(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm13 = ymm0[0],ymm1[0],ymm0[3],ymm1[2]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm9[0,1],ymm13[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1312(%rdi), %ymm9
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm13 = ymm6[0],ymm9[0],ymm6[3],ymm9[2]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm10[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 1376(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = xmm0[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vmovapd 1696(%rdi), %ymm8
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm8[0,1,2],ymm4[3]
+; AVX1-ONLY-NEXT:    vmovapd 1600(%rdi), %xmm5
+; AVX1-ONLY-NEXT:    vmovapd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm7 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm7 = xmm5[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm7[0,1],ymm4[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 192(%rdi), %ymm7
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm4 = ymm13[0],ymm7[0],ymm13[3],ymm7[2]
+; AVX1-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm13
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm13 = mem[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm13[0,1],ymm4[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 416(%rdi), %ymm13
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm4 = ymm11[0],ymm13[0],ymm11[3],ymm13[2]
+; AVX1-ONLY-NEXT:    vmovdqa 320(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm15 = xmm15[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm15[0,1],ymm4[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 544(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm4 = xmm3[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovapd 640(%rdi), %ymm5
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm15 = ymm3[0],ymm5[0],ymm3[3],ymm5[2]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm4[0,1],ymm15[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 864(%rdi), %ymm4
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm12 = ymm12[0],ymm4[0],ymm12[3],ymm4[2]
+; AVX1-ONLY-NEXT:    vmovdqa 768(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm12[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 992(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovapd 1088(%rdi), %ymm3
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm12 = ymm14[0],ymm3[0],ymm14[3],ymm3[2]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm12[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1312(%rdi), %ymm2
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm12 = ymm6[0],ymm2[0],ymm6[3],ymm2[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 1216(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm8 = xmm8[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm13 = ymm8[0,1],ymm13[2,3]
-; AVX1-ONLY-NEXT:    vmovdqa 1440(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovapd 1536(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm8 = ymm10[0],ymm0[0],ymm10[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm8 = ymm7[0,1],ymm8[2,3]
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm9 = xmm9[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm14 = ymm9[0,1],ymm12[2,3]
+; AVX1-ONLY-NEXT:    vmovdqa 1440(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm0[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovapd 1536(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm9 = ymm10[0],ymm1[0],ymm10[3],ymm1[2]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm9 = ymm0[0,1],ymm9[2,3]
 ; AVX1-ONLY-NEXT:    vmovapd 1760(%rdi), %ymm10
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm7 = ymm4[0],ymm10[0],ymm4[3],ymm10[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1664(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm14 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm14 = mem[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm14[0,1],ymm7[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm12 = mem[0,1,2],ymm12[3]
-; AVX1-ONLY-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm14 = xmm14[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm12 = ymm14[0,1],ymm12[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm11 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm11 = mem[0,1,2],ymm11[3]
-; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = mem[0,1,2,3],xmm5[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm11[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm2 = mem[0,1,2],ymm2[3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm8 = ymm8[0],ymm10[0],ymm8[3],ymm10[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1664(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm12 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm8 = ymm12[0,1],ymm8[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm7 = mem[0,1,2],ymm7[3]
+; AVX1-ONLY-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm12 = xmm12[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm12 = ymm12[0,1],ymm7[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm7 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm7 = mem[0,1,2],ymm13[3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm11 = mem[0,1,2,3],xmm11[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm11[0,1],ymm7[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm5 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm5 = mem[0,1,2],ymm5[3]
 ; AVX1-ONLY-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm11 = xmm11[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm11 = ymm11[0,1],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15, %ymm2 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm2 = mem[0,1,2],ymm15[3]
-; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm3 = mem[0,1,2,3],xmm3[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm15 = ymm3[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm11 = ymm11[0,1],ymm5[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm4 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm4 = mem[0,1,2],ymm4[3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = mem[0,1,2,3],xmm15[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3, %ymm3 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm3 = mem[0,1,2],ymm3[3]
+; AVX1-ONLY-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm5 = xmm5[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm2 = mem[0,1,2],ymm2[3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = mem[0,1,2,3],xmm6[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm5[0,1],ymm2[2,3]
 ; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm1 = mem[0,1,2],ymm1[3]
-; AVX1-ONLY-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm2 = xmm2[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm2[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm1 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm1 = mem[0,1,2],ymm9[3]
-; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = mem[0,1,2,3],xmm6[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm1[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm0 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2],ymm10[3]
-; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = mem[0,1,2,3],xmm4[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm4[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 192(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 128(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 64(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, (%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 224(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 160(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 96(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 32(%rsi)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 192(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 128(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 64(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, (%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 224(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 160(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 96(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 32(%rdx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 128(%rcx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 64(%rcx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, (%rcx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 192(%rcx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 224(%rcx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 160(%rcx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 96(%rcx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 32(%rcx)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, (%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 64(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 128(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 192(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 224(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 160(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 96(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 32(%r8)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 224(%r9)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 192(%r9)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 160(%r9)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 128(%r9)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 96(%r9)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 64(%r9)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 32(%r9)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, (%r9)
+; AVX1-ONLY-NEXT:    vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm5 = xmm5[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm5[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vblendpd $7, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm5 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm5 = mem[0,1,2],ymm10[3]
+; AVX1-ONLY-NEXT:    vpblendw $15, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm0 = mem[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3]
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 192(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 128(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 64(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, (%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 224(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 160(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 96(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 32(%rsi)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 192(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 128(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 64(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, (%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 224(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 160(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 96(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 32(%rdx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 128(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 64(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, (%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 192(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 224(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 160(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 96(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 32(%rcx)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, (%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 64(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 128(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 192(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 224(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 160(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 96(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 32(%r8)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 224(%r9)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 192(%r9)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 160(%r9)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 128(%r9)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 96(%r9)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 64(%r9)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 32(%r9)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, (%r9)
 ; AVX1-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-ONLY-NEXT:    vmovapd %ymm7, 224(%rax)
-; AVX1-ONLY-NEXT:    vmovapd %ymm8, 192(%rax)
-; AVX1-ONLY-NEXT:    vmovapd %ymm13, 160(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 128(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 96(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 64(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, 32(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm4, (%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm8, 224(%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm9, 192(%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm14, 160(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 128(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 96(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 64(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, 32(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm5, (%rax)
 ; AVX1-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX1-ONLY-NEXT:    vmovapd %ymm0, 224(%rax)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm1, 192(%rax)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm2, 160(%rax)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm3, 128(%rax)
-; AVX1-ONLY-NEXT:    vmovapd %ymm15, 96(%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm4, 96(%rax)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm11, 64(%rax)
-; AVX1-ONLY-NEXT:    vmovapd %ymm5, 32(%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm7, 32(%rax)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm12, (%rax)
 ; AVX1-ONLY-NEXT:    addq $1720, %rsp # imm = 0x6B8
 ; AVX1-ONLY-NEXT:    vzeroupper
@@ -3451,49 +3470,57 @@ define void @load_i64_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 416(%rdi), %xmm4
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 736(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 864(%rdi), %xmm3
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 1184(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 1312(%rdi), %xmm5
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 1632(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 1760(%rdi), %xmm15
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 1408(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 1536(%rdi), %xmm2
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 960(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 1088(%rdi), %xmm7
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 512(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 640(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm6 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm6[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm6[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 64(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm6
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm8 = mem[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm8[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm8[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm0
 ; AVX2-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm6
@@ -5777,22 +5804,22 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX1-ONLY-LABEL: load_i64_stride7_vf64:
 ; AVX1-ONLY:       # %bb.0:
-; AVX1-ONLY-NEXT:    subq $4232, %rsp # imm = 0x1088
+; AVX1-ONLY-NEXT:    subq $4264, %rsp # imm = 0x10A8
 ; AVX1-ONLY-NEXT:    vmovapd 1216(%rdi), %ymm10
-; AVX1-ONLY-NEXT:    vmovaps 768(%rdi), %ymm7
-; AVX1-ONLY-NEXT:    vmovaps 320(%rdi), %ymm4
+; AVX1-ONLY-NEXT:    vmovaps 768(%rdi), %ymm8
+; AVX1-ONLY-NEXT:    vmovaps 320(%rdi), %ymm13
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 384(%rdi), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 224(%rdi), %xmm13
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm13[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 224(%rdi), %xmm5
 ; AVX1-ONLY-NEXT:    vmovaps 272(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm13[0,1],xmm1[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm5[0,1],xmm1[2,3]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 832(%rdi), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm7[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovaps 672(%rdi), %xmm14
 ; AVX1-ONLY-NEXT:    vmovaps 720(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -5914,72 +5941,72 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1888(%rdi), %ymm12
+; AVX1-ONLY-NEXT:    vmovaps 1888(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 1952(%rdi), %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm12[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 1792(%rdi), %xmm11
-; AVX1-ONLY-NEXT:    vmovapd 1840(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm11[0],xmm1[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 2336(%rdi), %ymm9
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 1792(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vmovaps 1840(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm11[0,1],xmm1[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2336(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 2400(%rdi), %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm9[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 2240(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vmovapd 2288(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm8[0],xmm1[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 2240(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vmovaps 2288(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm1 = xmm7[0,1],xmm1[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 2784(%rdi), %ymm6
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 2848(%rdi), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 2688(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vmovapd 2736(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm5[0],xmm1[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm6[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd 2688(%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vmovapd 2736(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovapd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm2 = xmm12[0],xmm2[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 3232(%rdi), %ymm3
-; AVX1-ONLY-NEXT:    vinsertf128 $1, 3296(%rdi), %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, 3296(%rdi), %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3]
 ; AVX1-ONLY-NEXT:    vmovapd 3136(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovapd 3184(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 288(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm13[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovapd 384(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[0],ymm0[0],ymm4[3],ymm0[2]
+; AVX1-ONLY-NEXT:    vmovapd 3184(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vmovapd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = xmm2[0],xmm4[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 288(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm5[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovapd 384(%rdi), %ymm5
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm4 = ymm13[0],ymm5[0],ymm13[3],ymm5[2]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm4[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 736(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm4 = xmm14[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovapd 832(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm7 = ymm7[0],ymm0[0],ymm7[3],ymm0[2]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm4[0,1],ymm7[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 832(%rdi), %ymm9
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm8 = ymm8[0],ymm9[0],ymm8[3],ymm9[2]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm4[0,1],ymm8[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 1184(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm7 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm7 = mem[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm8 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm8 = mem[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovapd 1280(%rdi), %ymm4
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm10 = ymm10[0],ymm4[0],ymm10[3],ymm4[2]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm7[0,1],ymm10[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 1632(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm10 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm10 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovapd 1728(%rdi), %ymm7
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm13 = ymm15[0],ymm7[0],ymm15[3],ymm7[2]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm8 = ymm8[0,1],ymm10[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 1632(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm10 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm10 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovapd 1728(%rdi), %ymm8
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm13 = ymm15[0],ymm8[0],ymm15[3],ymm8[2]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm10 = ymm10[0,1],ymm13[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 2080(%rdi), %xmm10
@@ -6056,140 +6083,141 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 1952(%rdi), %ymm14
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm12 = ymm12[0],ymm14[0],ymm12[3],ymm14[2]
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm15 = ymm0[0],ymm14[0],ymm0[3],ymm14[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 1856(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm11 = xmm11[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm11 = ymm11[0,1],ymm12[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm11 = ymm11[0,1],ymm15[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 2400(%rdi), %ymm12
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm9 = ymm9[0],ymm12[0],ymm9[3],ymm12[2]
-; AVX1-ONLY-NEXT:    vmovdqa 2304(%rdi), %xmm11
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm8 = xmm8[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 2848(%rdi), %ymm8
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm9 = ymm6[0],ymm8[0],ymm6[3],ymm8[2]
+; AVX1-ONLY-NEXT:    vmovapd 2400(%rdi), %ymm11
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm15 = ymm0[0],ymm11[0],ymm0[3],ymm11[2]
+; AVX1-ONLY-NEXT:    vmovdqa 2304(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm7[0,1],ymm15[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 2848(%rdi), %ymm15
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm7 = ymm6[0],ymm15[0],ymm6[3],ymm15[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 2752(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm5 = xmm5[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm9[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 3296(%rdi), %ymm9
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm3[0],ymm9[0],ymm3[3],ymm9[2]
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm12[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm7[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 3296(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm7 = ymm3[0],ymm0[0],ymm3[3],ymm0[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 3200(%rdi), %xmm3
 ; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm2 = xmm2[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm5[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm7[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 352(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm2
-; AVX1-ONLY-NEXT:    vblendpd $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm2 = ymm2[0,1,2],mem[3]
-; AVX1-ONLY-NEXT:    vmovapd 240(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm15 = xmm0[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm15[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovaps 352(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1,2],ymm5[3]
+; AVX1-ONLY-NEXT:    vmovapd 240(%rdi), %xmm5
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm7 = xmm5[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm7[0,1],ymm2[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 800(%rdi), %xmm2
 ; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-ONLY-NEXT:    vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm1 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm1 = ymm2[0,1,2,3,4,5],mem[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 688(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1248(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm4[3]
-; AVX1-ONLY-NEXT:    vmovapd 1136(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm2 = xmm2[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 1696(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm7[3]
-; AVX1-ONLY-NEXT:    vmovapd 1584(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1,2],ymm9[3]
+; AVX1-ONLY-NEXT:    vmovapd 688(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm7 = xmm7[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm7[0,1],ymm2[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm2 = xmm2[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2144(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm10[3]
-; AVX1-ONLY-NEXT:    vmovapd 2032(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps 1248(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3]
+; AVX1-ONLY-NEXT:    vmovapd 1136(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = xmm4[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm2 = xmm2[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2592(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm13[3]
-; AVX1-ONLY-NEXT:    vmovapd 2480(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps 1696(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1,2],ymm8[3]
+; AVX1-ONLY-NEXT:    vmovapd 1584(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = xmm4[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm2 = xmm2[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 3040(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3,4,5],mem[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 2928(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps 2144(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1,2],ymm10[3]
+; AVX1-ONLY-NEXT:    vmovapd 2032(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = xmm4[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2592(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0,1,2],ymm13[3]
+; AVX1-ONLY-NEXT:    vmovapd 2480(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm4 = xmm4[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 3040(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm2 = ymm2[0,1,2,3,4,5],mem[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 2928(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm4 = xmm4[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 3488(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3,4,5],mem[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 3376(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps 3488(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm2 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm2 = ymm2[0,1,2,3,4,5],mem[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 3376(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm4 = xmm4[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 3264(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm9[3]
+; AVX1-ONLY-NEXT:    vmovaps 3264(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3]
 ; AVX1-ONLY-NEXT:    vmovdqa 3152(%rdi), %xmm2
 ; AVX1-ONLY-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2816(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm8[3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2816(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3]
 ; AVX1-ONLY-NEXT:    vmovdqa 2704(%rdi), %xmm2
 ; AVX1-ONLY-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 2368(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm12[3]
-; AVX1-ONLY-NEXT:    vmovdqa 2256(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm2[0,1,2,3],xmm11[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 1920(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm14[3]
-; AVX1-ONLY-NEXT:    vmovapd 1808(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vblendpd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = xmm9[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 1472(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm4
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 2368(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm11[3]
+; AVX1-ONLY-NEXT:    vmovdqa 2256(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm15[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 1920(%rdi), %xmm8
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm0[0,1,2],ymm14[3]
+; AVX1-ONLY-NEXT:    vmovapd 1808(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vblendpd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm4 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm4 = xmm2[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm4[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 1472(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm4
 ; AVX1-ONLY-NEXT:    vblendpd $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4, %ymm6 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm6 = ymm4[0,1,2],mem[3]
 ; AVX1-ONLY-NEXT:    vmovapd 1360(%rdi), %xmm4
@@ -6199,24 +6227,24 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 1024(%rdi), %xmm6
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm7
-; AVX1-ONLY-NEXT:    vblendpd $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm8 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm8 = ymm7[0,1,2],mem[3]
-; AVX1-ONLY-NEXT:    vmovapd 912(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vblendpd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm10 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm10 = xmm7[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm8 = ymm10[0,1],ymm8[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 576(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm10
-; AVX1-ONLY-NEXT:    vblendpd $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm10 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm10 = ymm10[0,1,2],mem[3]
+; AVX1-ONLY-NEXT:    vblendpd $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm7 = ymm7[0,1,2],mem[3]
+; AVX1-ONLY-NEXT:    vmovapd 912(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vblendpd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm10 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm10 = xmm9[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm10[0,1],ymm7[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 576(%rdi), %xmm10
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm7
+; AVX1-ONLY-NEXT:    vblendpd $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7, %ymm7 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm7 = ymm7[0,1,2],mem[3]
 ; AVX1-ONLY-NEXT:    vmovapd 464(%rdi), %xmm11
 ; AVX1-ONLY-NEXT:    vblendpd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm12 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm12 = xmm11[0],mem[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm10 = ymm12[0,1],ymm10[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm10
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm12
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm12[0,1],ymm7[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm12
 ; AVX1-ONLY-NEXT:    vblendpd $8, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm12 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm12 = ymm12[0,1,2],mem[3]
 ; AVX1-ONLY-NEXT:    vmovapd 16(%rdi), %xmm13
@@ -6227,193 +6255,208 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovapd 80(%rdi), %xmm14
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm12 = ymm13[1],ymm14[0],ymm13[2],ymm14[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm13
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm10 = xmm10[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm12, %ymm10
-; AVX1-ONLY-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 304(%rdi), %xmm10
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1],ymm10[0],ymm0[2],ymm10[2]
-; AVX1-ONLY-NEXT:    vmovdqa 416(%rdi), %xmm12
-; AVX1-ONLY-NEXT:    vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm5 = xmm5[8,9,10,11,12,13,14,15],xmm12[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 528(%rdi), %xmm12
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm11[1],ymm12[0],ymm11[2],ymm12[2]
-; AVX1-ONLY-NEXT:    vmovdqa 640(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm8 = xmm8[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm5, %ymm5
-; AVX1-ONLY-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 752(%rdi), %xmm11
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm5[1],ymm11[0],ymm5[2],ymm11[2]
-; AVX1-ONLY-NEXT:    vmovdqa 864(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm8 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm5, %ymm5
-; AVX1-ONLY-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 976(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm7[1],ymm5[0],ymm7[2],ymm5[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1088(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm12[0,1],ymm7[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 304(%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm5[1],ymm12[0],ymm5[2],ymm12[2]
+; AVX1-ONLY-NEXT:    vmovdqa 416(%rdi), %xmm7
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm6 = xmm6[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
-; AVX1-ONLY-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1200(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm5[1],ymm7[0],ymm5[2],ymm7[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1312(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm6 = mem[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm5
-; AVX1-ONLY-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1424(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[1],ymm5[0],ymm4[2],ymm5[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1536(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm1[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm4, %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1648(%rdi), %xmm6
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm7 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm7[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 528(%rdi), %xmm13
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm11[1],ymm13[0],ymm11[2],ymm13[2]
+; AVX1-ONLY-NEXT:    vmovdqa 640(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm7 = xmm10[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm0, %ymm7
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm7[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 752(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm0[1],ymm7[0],ymm0[2],ymm7[2]
+; AVX1-ONLY-NEXT:    vmovdqa 864(%rdi), %xmm5
+; AVX1-ONLY-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = mem[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm5[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 976(%rdi), %xmm10
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm9[1],ymm10[0],ymm9[2],ymm10[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1088(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm5 = xmm6[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm5[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1200(%rdi), %xmm6
 ; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],ymm6[0],ymm1[2],ymm6[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1760(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm4 = mem[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm1, %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 1872(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm9[1],ymm4[0],ymm9[2],ymm4[2]
-; AVX1-ONLY-NEXT:    vmovdqa 1984(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm3 = xmm3[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 2096(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],ymm9[0],ymm1[2],ymm9[2]
-; AVX1-ONLY-NEXT:    vmovdqa 2208(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm3 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 2320(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm2[1],ymm3[0],ymm2[2],ymm3[2]
-; AVX1-ONLY-NEXT:    vmovdqa 2432(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = mem[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 2544(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],ymm2[0],ymm1[2],ymm2[2]
-; AVX1-ONLY-NEXT:    vmovdqa 2656(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = mem[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 2768(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],ymm2[0],ymm1[2],ymm2[2]
-; AVX1-ONLY-NEXT:    vmovdqa 2880(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = mem[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 2992(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],ymm2[0],ymm1[2],ymm2[2]
-; AVX1-ONLY-NEXT:    vmovdqa 3104(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = mem[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 3216(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],ymm2[0],ymm1[2],ymm2[2]
-; AVX1-ONLY-NEXT:    vmovdqa 3328(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = mem[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 3440(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 1312(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm5 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm5 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm5
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm5[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1424(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm4[1],ymm0[0],ymm4[2],ymm0[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1536(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm3 = xmm3[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 1648(%rdi), %xmm4
 ; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],ymm2[0],ymm1[2],ymm2[2]
-; AVX1-ONLY-NEXT:    vmovdqa 3552(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm2 = mem[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm13, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vmovapd 128(%rdi), %ymm2
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm2 = mem[0],xmm14[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[1],ymm4[0],ymm1[2],ymm4[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1760(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm3 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vmovapd 352(%rdi), %ymm14
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm14[0,1,2],ymm1[3]
-; AVX1-ONLY-NEXT:    vmovapd 256(%rdi), %xmm13
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm2 = xmm13[0],xmm10[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 1872(%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm2[1],ymm3[0],ymm2[2],ymm3[2]
+; AVX1-ONLY-NEXT:    vmovdqa 1984(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm8[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 2096(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vmovapd 576(%rdi), %ymm2
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 480(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm1[0],xmm12[1]
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-ONLY-NEXT:    vmovdqa 2208(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 2320(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm15[1],ymm0[0],ymm15[2],ymm0[2]
+; AVX1-ONLY-NEXT:    vmovdqa 2432(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 2544(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm0[1],ymm15[0],ymm0[2],ymm15[2]
+; AVX1-ONLY-NEXT:    vmovdqa 2656(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 2768(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-ONLY-NEXT:    vmovdqa 2880(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 2992(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-ONLY-NEXT:    vmovdqa 3104(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 3216(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-ONLY-NEXT:    vmovdqa 3328(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 3440(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[2],ymm1[2]
+; AVX1-ONLY-NEXT:    vmovdqa 3552(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vmovapd 128(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = mem[0],xmm14[1]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vmovapd 800(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vmovapd 352(%rdi), %ymm1
 ; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 704(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm1[0],xmm11[1]
+; AVX1-ONLY-NEXT:    vmovapd 256(%rdi), %xmm14
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm14[0],xmm12[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 576(%rdi), %ymm11
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm11[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd 480(%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm12[0],xmm13[1]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vmovapd 1024(%rdi), %ymm10
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 928(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = xmm1[0],mem[1]
+; AVX1-ONLY-NEXT:    vmovapd 800(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd 704(%rdi), %xmm13
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm13[0],xmm7[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm9, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 1024(%rdi), %ymm7
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm7[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd 928(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm9[0],xmm10[1]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    vmovapd 1248(%rdi), %ymm1
 ; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 1152(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm1[0],xmm7[1]
+; AVX1-ONLY-NEXT:    vmovapd 1152(%rdi), %xmm10
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm10[0],xmm6[1]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vmovapd 1472(%rdi), %ymm7
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm7[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd 1472(%rdi), %ymm6
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm6[0,1,2],ymm0[3]
 ; AVX1-ONLY-NEXT:    vmovapd 1376(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm1[0],xmm5[1]
+; AVX1-ONLY-NEXT:    vblendpd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = xmm1[0],mem[1]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
@@ -6422,29 +6465,34 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3]
 ; AVX1-ONLY-NEXT:    vmovapd 1600(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm1[0],xmm6[1]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vmovapd 1920(%rdi), %ymm5
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm5[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 1824(%rdi), %xmm12
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm12[0],xmm4[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm1[0],xmm4[1]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vmovapd 2144(%rdi), %ymm11
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm11[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 2048(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovapd 1920(%rdi), %ymm4
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd 1824(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm1[0],xmm9[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm1[0],xmm3[1]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vmovapd 2368(%rdi), %ymm4
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm4[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 2272(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm9[0],xmm3[1]
+; AVX1-ONLY-NEXT:    vmovaps 2144(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 2048(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = xmm1[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vmovapd 2368(%rdi), %ymm3
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd 2272(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = xmm1[0],mem[1]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
@@ -6452,28 +6500,27 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm8[0,1,2],ymm0[3]
 ; AVX1-ONLY-NEXT:    vmovapd 2496(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} xmm1 = xmm1[0],xmm15[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vmovapd 2816(%rdi), %ymm15
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm15[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd 2720(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vblendpd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm1 = xmm1[0],mem[1]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vmovaps 2816(%rdi), %ymm3
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 2720(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = xmm6[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    vmovaps 3040(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 2944(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, (%rsp) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = xmm1[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 3040(%rdi), %ymm5
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm5[0,1,2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovapd 2944(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovapd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd $2, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = xmm1[0],mem[1]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    vmovaps 3264(%rdi), %ymm1
 ; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -6487,11 +6534,12 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    vmovaps 3488(%rdi), %ymm1
 ; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps 3392(%rdi), %xmm15
-; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = xmm15[0,1],mem[2,3]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 3392(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vblendps $12, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm1 = xmm0[0,1],mem[2,3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm2[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 192(%rdi), %ymm1
 ; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -6501,20 +6549,20 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 416(%rdi), %ymm0
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm14[0],ymm0[0],ymm14[3],ymm0[2]
+; AVX1-ONLY-NEXT:    vmovapd 416(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 320(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm13[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovdqa %xmm1, (%rsp) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm14[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 544(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm0 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm12[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovapd 640(%rdi), %ymm1
 ; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[3],ymm1[2]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm11[0],ymm1[0],ymm11[3],ymm1[2]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 864(%rdi), %ymm1
@@ -6523,16 +6571,14 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 768(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm13[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 992(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm0 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm9[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovapd 1088(%rdi), %ymm1
 ; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm10[0],ymm1[0],ymm10[3],ymm1[2]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm7[0],ymm1[0],ymm7[3],ymm1[2]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 1312(%rdi), %ymm1
@@ -6541,15 +6587,14 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 1216(%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = xmm10[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 1440(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm0 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovapd 1536(%rdi), %ymm13
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm7[0],ymm13[0],ymm7[3],ymm13[2]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm6[0],ymm13[0],ymm6[3],ymm13[2]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 1760(%rdi), %ymm1
@@ -6562,22 +6607,25 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 1888(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm12[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm0 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovapd 1984(%rdi), %ymm10
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm5[0],ymm10[0],ymm5[3],ymm10[2]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm4[0],ymm10[0],ymm4[3],ymm10[2]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 2208(%rdi), %ymm12
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm11[0],ymm12[0],ymm11[3],ymm12[2]
+; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm12[0],ymm0[3],ymm12[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 2112(%rdi), %xmm11
 ; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm1 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 2336(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm9[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm0 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovapd 2432(%rdi), %ymm7
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm4[0],ymm7[0],ymm4[3],ymm7[2]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm3[0],ymm7[0],ymm3[3],ymm7[2]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 2656(%rdi), %ymm9
@@ -6588,16 +6636,16 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 2784(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm0 = xmm6[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm0 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovapd 2880(%rdi), %ymm4
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm3[0],ymm4[0],ymm3[3],ymm4[2]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm15[0],ymm4[0],ymm15[3],ymm4[2]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 3104(%rdi), %ymm6
-; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm6[0],ymm0[3],ymm6[2]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm5[0],ymm6[0],ymm5[3],ymm6[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 3008(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vpalignr $8, (%rsp), %xmm5, %xmm1 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm1 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm1 = mem[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -6613,7 +6661,8 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovupd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[3],ymm2[2]
 ; AVX1-ONLY-NEXT:    vmovdqa 3456(%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vpalignr {{.*#+}} xmm15 = xmm15[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
+; AVX1-ONLY-NEXT:    vpalignr $8, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm15 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm15 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@@ -6626,7 +6675,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $63, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # ymm0 = mem[0,1,2,3,4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps (%rsp), %xmm15 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vblendps $3, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm15 = mem[0,1],xmm15[2,3]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1,2,3],ymm0[4,5,6,7]
@@ -6928,7 +6977,7 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 32(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, (%rax)
-; AVX1-ONLY-NEXT:    addq $4232, %rsp # imm = 0x1088
+; AVX1-ONLY-NEXT:    addq $4264, %rsp # imm = 0x10A8
 ; AVX1-ONLY-NEXT:    vzeroupper
 ; AVX1-ONLY-NEXT:    retq
 ;
@@ -7352,101 +7401,117 @@ define void @load_i64_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm12
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm12[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 288(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 416(%rdi), %xmm11
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 512(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 640(%rdi), %xmm8
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 736(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 864(%rdi), %xmm6
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 960(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 1088(%rdi), %xmm5
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 1184(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 1312(%rdi), %xmm4
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 1408(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 1536(%rdi), %xmm2
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 1632(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 1760(%rdi), %xmm3
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm1 = mem[8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 1856(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm0 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 1984(%rdi), %xmm1
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm7 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm7[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm7[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 2080(%rdi), %ymm0
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm7 = mem[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 2208(%rdi), %xmm0
 ; AVX2-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm9 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm7[2,3],ymm9[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm7 = ymm7[2,3],ymm9[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 2304(%rdi), %ymm7
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm9 = mem[8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 2432(%rdi), %xmm7
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm10 = mem[8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm9 = ymm9[2,3],ymm10[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm9 = ymm9[2,3],ymm10[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 2528(%rdi), %ymm9
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm9 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 2656(%rdi), %xmm10
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm13 = mem[8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm9 = ymm9[2,3],ymm13[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm9 = ymm9[2,3],ymm13[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 2752(%rdi), %ymm9
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm13 = mem[8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 2880(%rdi), %xmm9
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm14 = mem[8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm14[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm14[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 2976(%rdi), %ymm13
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 3104(%rdi), %xmm0
 ; AVX2-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm14 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm14[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm14[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 3200(%rdi), %ymm13
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 3328(%rdi), %xmm0
 ; AVX2-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm14 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm14[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm14[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vmovdqa 3424(%rdi), %ymm13
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} ymm13 = mem[8,9,10,11,12,13,14,15],ymm13[0,1,2,3,4,5,6,7],mem[24,25,26,27,28,29,30,31],ymm13[16,17,18,19,20,21,22,23]
 ; AVX2-ONLY-NEXT:    vmovdqa 3552(%rdi), %xmm0
 ; AVX2-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX2-ONLY-NEXT:    vpalignr {{.*#+}} xmm14 = mem[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
-; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm14[0,1]
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX2-ONLY-NEXT:    vperm2i128 {{.*#+}} ymm13 = ymm13[2,3],ymm14[2,3]
 ; AVX2-ONLY-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
 ; AVX2-ONLY-NEXT:    vmovdqa 128(%rdi), %ymm14

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll
index 91bab216c00a5..042c6d2cc02ba 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-4.ll
@@ -761,7 +761,8 @@ define void @load_i8_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm5
 ; AVX1-ONLY-NEXT:    vpshufb %xmm9, %xmm5, %xmm6
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm4[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm10
 ; AVX1-ONLY-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm4
 ; AVX1-ONLY-NEXT:    vmovdqa 32(%rdi), %xmm6
@@ -773,7 +774,7 @@ define void @load_i8_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vpshufb %xmm9, %xmm1, %xmm9
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0,1,2,3],xmm8[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm8, %ymm8
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm10[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
 ; AVX1-ONLY-NEXT:    vpshufb %xmm9, %xmm0, %xmm10
 ; AVX1-ONLY-NEXT:    vpshufb %xmm9, %xmm2, %xmm11
@@ -783,6 +784,7 @@ define void @load_i8_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vpshufb %xmm11, %xmm5, %xmm13
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm12[0,1,2,3],xmm10[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm0, %ymm10
 ; AVX1-ONLY-NEXT:    vpshufb %xmm9, %xmm7, %xmm12
 ; AVX1-ONLY-NEXT:    vpshufb %xmm9, %xmm6, %xmm9
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm12[0],xmm9[1],xmm12[1]
@@ -790,7 +792,7 @@ define void @load_i8_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vpshufb %xmm11, %xmm1, %xmm11
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm11[0,1,2,3],xmm9[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,2,6,10,14,u,u,u,u,u,u,u,u>
 ; AVX1-ONLY-NEXT:    vpshufb %xmm10, %xmm0, %xmm11
 ; AVX1-ONLY-NEXT:    vpshufb %xmm10, %xmm2, %xmm12
@@ -800,6 +802,7 @@ define void @load_i8_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vpshufb %xmm12, %xmm5, %xmm14
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm11 = xmm13[0,1,2,3],xmm11[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm0, %ymm11
 ; AVX1-ONLY-NEXT:    vpshufb %xmm10, %xmm7, %xmm13
 ; AVX1-ONLY-NEXT:    vpshufb %xmm10, %xmm6, %xmm10
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1]
@@ -807,7 +810,7 @@ define void @load_i8_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vpshufb %xmm12, %xmm1, %xmm12
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm12[0,1,2,3],xmm10[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm10, %ymm10
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,3,7,11,15,u,u,u,u,u,u,u,u>
 ; AVX1-ONLY-NEXT:    vpshufb %xmm11, %xmm0, %xmm0
 ; AVX1-ONLY-NEXT:    vpshufb %xmm11, %xmm2, %xmm2
@@ -817,6 +820,7 @@ define void @load_i8_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm5, %xmm5
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vpshufb %xmm11, %xmm7, %xmm3
 ; AVX1-ONLY-NEXT:    vpshufb %xmm11, %xmm6, %xmm5
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
@@ -824,7 +828,7 @@ define void @load_i8_stride4_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovaps %ymm8, (%rsi)
 ; AVX1-ONLY-NEXT:    vmovaps %ymm9, (%rdx)
 ; AVX1-ONLY-NEXT:    vmovaps %ymm10, (%rcx)
@@ -1519,20 +1523,21 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vmovdqa 96(%rdi), %xmm3
 ; AVX1-ONLY-NEXT:    vpshufb %xmm1, %xmm3, %xmm2
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm3, %xmm8
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
-; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-ONLY-NEXT:    vmovdqa 80(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm3, %xmm5
-; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm3, %xmm7
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm3 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX1-ONLY-NEXT:    vmovdqa 80(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshufb %xmm3, %xmm0, %xmm5
+; AVX1-ONLY-NEXT:    vmovdqa 64(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vpshufb %xmm3, %xmm0, %xmm7
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm9 = xmm5[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0,1,2,3],xmm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm9
 ; AVX1-ONLY-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 32(%rdi), %xmm4
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 48(%rdi), %xmm5
@@ -1540,11 +1545,11 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vpshufb %xmm1, %xmm5, %xmm10
 ; AVX1-ONLY-NEXT:    vpshufb %xmm1, %xmm4, %xmm11
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
-; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm3, %xmm11
-; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm0, %xmm12
+; AVX1-ONLY-NEXT:    vpshufb %xmm3, %xmm2, %xmm11
+; AVX1-ONLY-NEXT:    vpshufb %xmm3, %xmm0, %xmm12
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0,1,2,3],xmm10[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm9, %ymm10, %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm9[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa 240(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -1555,12 +1560,12 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm13 = xmm12[0],xmm11[0],xmm12[1],xmm11[1]
 ; AVX1-ONLY-NEXT:    vmovdqa 208(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm0, %xmm14
+; AVX1-ONLY-NEXT:    vpshufb %xmm3, %xmm0, %xmm14
 ; AVX1-ONLY-NEXT:    vmovdqa 192(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm0, %xmm15
+; AVX1-ONLY-NEXT:    vpshufb %xmm3, %xmm0, %xmm15
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
-; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm14[0,1,2,3],xmm13[4,5,6,7]
+; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm14[0,1,2,3],xmm13[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa 176(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufb %xmm1, %xmm0, %xmm15
@@ -1570,13 +1575,14 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1]
 ; AVX1-ONLY-NEXT:    vmovdqa 144(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; AVX1-ONLY-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
 ; AVX1-ONLY-NEXT:    vmovdqa 128(%rdi), %xmm4
 ; AVX1-ONLY-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
-; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; AVX1-ONLY-NEXT:    vpshufb %xmm3, %xmm4, %xmm3
+; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
 ; AVX1-ONLY-NEXT:    vpshufb %xmm0, %xmm6, %xmm1
@@ -1602,7 +1608,8 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm9, %xmm6
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpshufb %xmm0, %xmm13, %xmm1
@@ -1626,7 +1633,8 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,2,6,10,14,u,u,u,u,u,u,u,u>
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
@@ -1645,7 +1653,8 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm9, %xmm6
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1,2,3],xmm3[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm3, %ymm1
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vpshufb %xmm0, %xmm13, %xmm1
 ; AVX1-ONLY-NEXT:    vpshufb %xmm0, %xmm14, %xmm3
@@ -1666,7 +1675,8 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vpshufb %xmm2, %xmm11, %xmm2
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,3,7,11,15,u,u,u,u,u,u,u,u>
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpshufb %xmm1, %xmm2, %xmm2
@@ -1689,7 +1699,8 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vpshufb %xmm3, %xmm7, %xmm7
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0,1,2,3],xmm5[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm5, %ymm2
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufb %xmm1, %xmm13, %xmm5
 ; AVX1-ONLY-NEXT:    vpshufb %xmm1, %xmm14, %xmm6
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
@@ -1704,7 +1715,8 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX1-ONLY-NEXT:    vpshufb %xmm3, %xmm11, %xmm3
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm1, %ymm1
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm0, %ymm3
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm3, 32(%rsi)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
@@ -1912,7 +1924,8 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,4,0,4,0,4,8,12]
 ; AVX512F-NEXT:    vpermt2d %ymm5, %ymm1, %ymm6
 ; AVX512F-NEXT:    vpmovdb %zmm2, %xmm5
-; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm5[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm8
 ; AVX512F-NEXT:    vmovdqa 96(%rdi), %ymm5
 ; AVX512F-NEXT:    vpshufb %ymm7, %ymm5, %ymm9
 ; AVX512F-NEXT:    vmovdqa 64(%rdi), %ymm6
@@ -1920,7 +1933,7 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpermt2d %ymm9, %ymm1, %ymm7
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm9
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3],ymm7[4,5,6,7]
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm8[0,1,2,3]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm8[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm8 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512F-NEXT:    vpshufb %ymm8, %ymm3, %ymm9
 ; AVX512F-NEXT:    vpshufb %ymm8, %ymm4, %ymm10
@@ -1928,13 +1941,14 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpsrld $8, %zmm2, %zmm9
 ; AVX512F-NEXT:    vpmovdb %zmm9, %xmm9
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm9
 ; AVX512F-NEXT:    vpshufb %ymm8, %ymm5, %ymm10
 ; AVX512F-NEXT:    vpshufb %ymm8, %ymm6, %ymm8
 ; AVX512F-NEXT:    vpermt2d %ymm10, %ymm1, %ymm8
 ; AVX512F-NEXT:    vpsrld $8, %zmm0, %zmm10
 ; AVX512F-NEXT:    vpmovdb %zmm10, %xmm10
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm8 = ymm10[0,1,2,3],ymm8[4,5,6,7]
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],zmm9[0,1,2,3]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],zmm9[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u,2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512F-NEXT:    vpshufb %ymm9, %ymm3, %ymm10
 ; AVX512F-NEXT:    vpshufb %ymm9, %ymm4, %ymm11
@@ -1942,13 +1956,14 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpsrld $16, %zmm2, %zmm10
 ; AVX512F-NEXT:    vpmovdb %zmm10, %xmm10
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm10
 ; AVX512F-NEXT:    vpshufb %ymm9, %ymm5, %ymm11
 ; AVX512F-NEXT:    vpshufb %ymm9, %ymm6, %ymm9
 ; AVX512F-NEXT:    vpermt2d %ymm11, %ymm1, %ymm9
 ; AVX512F-NEXT:    vpsrld $16, %zmm0, %zmm11
 ; AVX512F-NEXT:    vpmovdb %zmm11, %xmm11
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm9 = ymm11[0,1,2,3],ymm9[4,5,6,7]
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,2,3],zmm10[0,1,2,3]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,2,3],zmm10[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm10 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u,3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512F-NEXT:    vpshufb %ymm10, %ymm3, %ymm3
 ; AVX512F-NEXT:    vpshufb %ymm10, %ymm4, %ymm4
@@ -1956,13 +1971,14 @@ define void @load_i8_stride4_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vpsrld $24, %zmm2, %zmm2
 ; AVX512F-NEXT:    vpmovdb %zmm2, %xmm2
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm4[4,5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm2
 ; AVX512F-NEXT:    vpshufb %ymm10, %ymm5, %ymm3
 ; AVX512F-NEXT:    vpshufb %ymm10, %ymm6, %ymm4
 ; AVX512F-NEXT:    vpermt2d %ymm3, %ymm1, %ymm4
 ; AVX512F-NEXT:    vpsrld $24, %zmm0, %zmm0
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm4[4,5,6,7]
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[0,1,2,3]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqa64 %zmm7, (%rsi)
 ; AVX512F-NEXT:    vmovdqa64 %zmm8, (%rdx)
 ; AVX512F-NEXT:    vmovdqa64 %zmm9, (%rcx)

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
index 81c37145ef8e6..ab9da42de3ca3 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
@@ -1858,224 +1858,117 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
-; AVX512BW-SLOW-LABEL: load_i8_stride5_vf32:
-; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %ymm3
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm2
-; AVX512BW-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm0
-; AVX512BW-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm1
-; AVX512BW-SLOW-NEXT:    movw $21140, %ax # imm = 0x5294
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm1, %ymm0, %ymm4 {%k1}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    movl $1108344832, %eax # imm = 0x42100000
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k2
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm5, %ymm4 {%k2}
-; AVX512BW-SLOW-NEXT:    movw $19026, %ax # imm = 0x4A52
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k2
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm5 {%k2}
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,xmm6[4,9,14],zero,zero,zero,xmm6[2,7,12,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,5,10,15],zero,zero,zero,xmm5[3,8,13],zero,zero,zero,xmm5[u,u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    movl $67100672, %eax # imm = 0x3FFE000
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k3
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 {%k3} = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,1,6,11,16,21,26,31,20,25,30,19,24,29,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vmovdqa 144(%rdi), %xmm6
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm6[1,6,11]
-; AVX512BW-SLOW-NEXT:    vmovdqa 128(%rdi), %xmm7
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,u,u,u,u,u,u,2,7,12],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7],ymm5[8,9,10,11,12],ymm4[13,14,15]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    movw $10570, %ax # imm = 0x294A
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k4
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm0, %ymm1, %ymm5 {%k4}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm5[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    movl $-2078212096, %eax # imm = 0x84210000
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k5
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm8, %ymm5 {%k5}
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm8 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm8[1,6,11],zero,zero,zero,zero,xmm8[4,9,14],zero,zero,zero,xmm8[u,u,u]
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm8[0,5,10,15],zero,zero,zero,xmm8[3,8,13,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm9, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 {%k3} = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,2,7,12,17,22,27,16,21,26,31,20,25,30,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm6[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm6[2,7,12]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm7[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %xmm5, %xmm9, %xmm5
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm8[0,1,2,3,4],ymm5[5,6,7],ymm8[8,9,10,11,12],ymm5[13,14,15]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm0, %ymm1, %ymm8 {%k2}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm8[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    movl $138543104, %eax # imm = 0x8420000
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k5
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm9, %ymm8 {%k5}
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm3, %ymm2, %ymm9 {%k4}
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm10
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[1,6,11],zero,zero,zero,zero,xmm10[4,9,14,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[2,7,12],zero,zero,zero,xmm9[0,5,10,15],zero,zero,zero,xmm9[u,u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm10, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 {%k3} = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,3,8,13,18,23,28,17,22,27,16,21,26,31,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm6[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm6[3,8,13]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm7[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %xmm8, %xmm10, %xmm8
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0,1,2,3,4],ymm8[5,6,7],ymm9[8,9,10,11,12],ymm8[13,14,15]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm0, %ymm1, %ymm9 {%k1}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm9[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    movl $277086208, %eax # imm = 0x10840000
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k3
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm10, %ymm9 {%k3}
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm3, %ymm2, %ymm10 {%k2}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm10[3,8,13],zero,zero,zero,xmm10[1,6,11],zero,zero,zero,zero,xmm10[u,u,u]
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[2,7,12],zero,zero,zero,xmm10[0,5,10,15,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm11, %xmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    movl $33546240, %eax # imm = 0x1FFE000
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k3
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 {%k3} = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,xmm6[4,9,14]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %xmm6, %xmm7, %xmm6
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512BW-SLOW-NEXT:    movl $-33554432, %eax # imm = 0xFE000000
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k3
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm6, %ymm10 {%k3}
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm3, %ymm2 {%k1}
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero,xmm2[u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k2}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    movl $554172416, %eax # imm = 0x21080000
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm1
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [21474836480,21474836480,21474836480,21474836480]
-; AVX512BW-SLOW-NEXT:    vpermd %ymm1, %ymm2, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k3}
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm4, (%rsi)
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm5, (%rdx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm8, (%rcx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm10, (%r8)
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm0, (%r9)
-; AVX512BW-SLOW-NEXT:    vzeroupper
-; AVX512BW-SLOW-NEXT:    retq
-;
-; AVX512BW-FAST-LABEL: load_i8_stride5_vf32:
-; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %ymm4
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %ymm2
-; AVX512BW-FAST-NEXT:    vmovdqa 64(%rdi), %ymm0
-; AVX512BW-FAST-NEXT:    vmovdqa 96(%rdi), %ymm1
-; AVX512BW-FAST-NEXT:    movw $21140, %ax # imm = 0x5294
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm1, %ymm0, %ymm5 {%k1}
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm5[2,3,0,1]
-; AVX512BW-FAST-NEXT:    movl $1108344832, %eax # imm = 0x42100000
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k2
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm3, %ymm5 {%k2}
-; AVX512BW-FAST-NEXT:    movw $19026, %ax # imm = 0x4A52
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k2
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm2, %ymm4, %ymm3 {%k2}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm6
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,xmm6[4,9,14],zero,zero,zero,xmm6[2,7,12,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[0,5,10,15],zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm6, %xmm3, %xmm3
-; AVX512BW-FAST-NEXT:    movl $67100672, %eax # imm = 0x3FFE000
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k3
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm3 {%k3} = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,1,6,11,16,21,26,31,20,25,30,19,24,29,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vmovdqa 144(%rdi), %xmm6
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm6[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm6[1,6,11]
-; AVX512BW-FAST-NEXT:    vmovdqa 128(%rdi), %xmm7
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,u,u,u,u,u,u,2,7,12],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpor %xmm5, %xmm8, %xmm5
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,1,2,3,4,5,6,7,8,9,10,11,12,21,22,23]
-; AVX512BW-FAST-NEXT:    vpermt2w %ymm5, %ymm8, %ymm3
-; AVX512BW-FAST-NEXT:    movw $10570, %ax # imm = 0x294A
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k4
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm0, %ymm1, %ymm9 {%k4}
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm9[2,3,0,1]
-; AVX512BW-FAST-NEXT:    movl $-2078212096, %eax # imm = 0x84210000
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k5
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm5, %ymm9 {%k5}
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm2, %ymm4, %ymm5 {%k1}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm5[1,6,11],zero,zero,zero,zero,xmm5[4,9,14],zero,zero,zero,xmm5[u,u,u]
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm5
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[0,5,10,15],zero,zero,zero,xmm5[3,8,13,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm5, %xmm10, %xmm5
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm5 {%k3} = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,2,7,12,17,22,27,16,21,26,31,20,25,30,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm6[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm6[2,7,12]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm7[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpor %xmm9, %xmm10, %xmm9
-; AVX512BW-FAST-NEXT:    vpermt2w %ymm9, %ymm8, %ymm5
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm0, %ymm1, %ymm9 {%k2}
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm9[2,3,0,1]
-; AVX512BW-FAST-NEXT:    movl $138543104, %eax # imm = 0x8420000
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k5
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm10, %ymm9 {%k5}
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm4, %ymm2, %ymm10 {%k4}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm11
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm11[1,6,11],zero,zero,zero,zero,xmm11[4,9,14,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[2,7,12],zero,zero,zero,xmm10[0,5,10,15],zero,zero,zero,xmm10[u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm11, %xmm10, %xmm10
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm10 {%k3} = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,3,8,13,18,23,28,17,22,27,16,21,26,31,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm6[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm6[3,8,13]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm7[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpor %xmm9, %xmm11, %xmm9
-; AVX512BW-FAST-NEXT:    vpermt2w %ymm9, %ymm8, %ymm10
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm0, %ymm1, %ymm8 {%k1}
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm8[2,3,0,1]
-; AVX512BW-FAST-NEXT:    movl $277086208, %eax # imm = 0x10840000
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k3
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm9, %ymm8 {%k3}
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm4, %ymm2, %ymm9 {%k2}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm9[3,8,13],zero,zero,zero,xmm9[1,6,11],zero,zero,zero,zero,xmm9[u,u,u]
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm9
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[2,7,12],zero,zero,zero,xmm9[0,5,10,15,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm11, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    movl $33546240, %eax # imm = 0x1FFE000
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k3
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm9 {%k3} = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,xmm6[4,9,14]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpor %xmm6, %xmm7, %xmm6
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512BW-FAST-NEXT:    movl $-33554432, %eax # imm = 0xFE000000
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k3
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm6, %ymm9 {%k3}
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm4, %ymm2 {%k1}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[3,8,13],zero,zero,zero,xmm4[1,6,11,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero,xmm2[u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm4, %xmm2, %xmm2
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k2}
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX512BW-FAST-NEXT:    movl $554172416, %eax # imm = 0x21080000
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k1}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
-; AVX512BW-FAST-NEXT:    vmovdqa 128(%rdi), %ymm1
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [21474836480,21474836480,21474836480,21474836480]
-; AVX512BW-FAST-NEXT:    vpermd %ymm1, %ymm2, %ymm1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k3}
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm3, (%rsi)
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm5, (%rdx)
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm10, (%rcx)
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm9, (%r8)
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm0, (%r9)
-; AVX512BW-FAST-NEXT:    vzeroupper
-; AVX512BW-FAST-NEXT:    retq
+; AVX512BW-LABEL: load_i8_stride5_vf32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm3
+; AVX512BW-NEXT:    vmovdqa 32(%rdi), %ymm2
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %ymm0
+; AVX512BW-NEXT:    vmovdqa 96(%rdi), %ymm1
+; AVX512BW-NEXT:    movw $21140, %ax # imm = 0x5294
+; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    vpblendmw %ymm1, %ymm0, %ymm4 {%k1}
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
+; AVX512BW-NEXT:    movl $1108344832, %eax # imm = 0x42100000
+; AVX512BW-NEXT:    kmovd %eax, %k2
+; AVX512BW-NEXT:    vmovdqu8 %ymm5, %ymm4 {%k2}
+; AVX512BW-NEXT:    movw $19026, %ax # imm = 0x4A52
+; AVX512BW-NEXT:    kmovd %eax, %k2
+; AVX512BW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm5 {%k2}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,xmm6[4,9,14],zero,zero,zero,xmm6[2,7,12,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,5,10,15],zero,zero,zero,xmm5[3,8,13],zero,zero,zero,xmm5[u,u,u]
+; AVX512BW-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX512BW-NEXT:    movl $67100672, %eax # imm = 0x3FFE000
+; AVX512BW-NEXT:    kmovd %eax, %k3
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm5 {%k3} = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,1,6,11,16,21,26,31,20,25,30,19,24,29,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vmovdqa 144(%rdi), %xmm6
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm6[1,6,11]
+; AVX512BW-NEXT:    vmovdqa 128(%rdi), %xmm7
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,u,u,u,u,u,u,2,7,12],zero,zero,zero
+; AVX512BW-NEXT:    vpor %xmm4, %xmm8, %xmm4
+; AVX512BW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5,6,7],ymm5[8,9,10,11,12],ymm4[13,14,15]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7]
+; AVX512BW-NEXT:    movw $10570, %ax # imm = 0x294A
+; AVX512BW-NEXT:    kmovd %eax, %k4
+; AVX512BW-NEXT:    vpblendmw %ymm0, %ymm1, %ymm5 {%k4}
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm8 = ymm5[2,3,0,1]
+; AVX512BW-NEXT:    movl $-2078212096, %eax # imm = 0x84210000
+; AVX512BW-NEXT:    kmovd %eax, %k5
+; AVX512BW-NEXT:    vmovdqu8 %ymm8, %ymm5 {%k5}
+; AVX512BW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm8 {%k1}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm8[1,6,11],zero,zero,zero,zero,xmm8[4,9,14],zero,zero,zero,xmm8[u,u,u]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm8, %xmm8
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm8[0,5,10,15],zero,zero,zero,xmm8[3,8,13,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm9, %xmm8, %xmm8
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm8 {%k3} = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,2,7,12,17,22,27,16,21,26,31,20,25,30,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm6[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm6[2,7,12]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm7[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
+; AVX512BW-NEXT:    vpor %xmm5, %xmm9, %xmm5
+; AVX512BW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm8[0,1,2,3,4],ymm5[5,6,7],ymm8[8,9,10,11,12],ymm5[13,14,15]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-NEXT:    vpblendmw %ymm0, %ymm1, %ymm8 {%k2}
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm9 = ymm8[2,3,0,1]
+; AVX512BW-NEXT:    movl $138543104, %eax # imm = 0x8420000
+; AVX512BW-NEXT:    kmovd %eax, %k5
+; AVX512BW-NEXT:    vmovdqu8 %ymm9, %ymm8 {%k5}
+; AVX512BW-NEXT:    vpblendmw %ymm3, %ymm2, %ymm9 {%k4}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm9, %xmm10
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[1,6,11],zero,zero,zero,zero,xmm10[4,9,14,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[2,7,12],zero,zero,zero,xmm9[0,5,10,15],zero,zero,zero,xmm9[u,u,u]
+; AVX512BW-NEXT:    vpor %xmm10, %xmm9, %xmm9
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm9 {%k3} = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,3,8,13,18,23,28,17,22,27,16,21,26,31,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm6[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm6[3,8,13]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm7[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
+; AVX512BW-NEXT:    vpor %xmm8, %xmm10, %xmm8
+; AVX512BW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0,1,2,3,4],ymm8[5,6,7],ymm9[8,9,10,11,12],ymm8[13,14,15]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-NEXT:    vpblendmw %ymm0, %ymm1, %ymm9 {%k1}
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm10 = ymm9[2,3,0,1]
+; AVX512BW-NEXT:    movl $277086208, %eax # imm = 0x10840000
+; AVX512BW-NEXT:    kmovd %eax, %k3
+; AVX512BW-NEXT:    vmovdqu8 %ymm10, %ymm9 {%k3}
+; AVX512BW-NEXT:    vpblendmw %ymm3, %ymm2, %ymm10 {%k2}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm10[3,8,13],zero,zero,zero,xmm10[1,6,11],zero,zero,zero,zero,xmm10[u,u,u]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm10, %xmm10
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[2,7,12],zero,zero,zero,xmm10[0,5,10,15,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm11, %xmm10, %xmm10
+; AVX512BW-NEXT:    movl $33546240, %eax # imm = 0x1FFE000
+; AVX512BW-NEXT:    kmovd %eax, %k3
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm10 {%k3} = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,xmm6[4,9,14]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
+; AVX512BW-NEXT:    vpor %xmm6, %xmm7, %xmm6
+; AVX512BW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512BW-NEXT:    movl $-33554432, %eax # imm = 0xFE000000
+; AVX512BW-NEXT:    kmovd %eax, %k3
+; AVX512BW-NEXT:    vmovdqu8 %ymm6, %ymm10 {%k3}
+; AVX512BW-NEXT:    vmovdqu16 %ymm3, %ymm2 {%k1}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero,xmm2[u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX512BW-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k2}
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
+; AVX512BW-NEXT:    movl $554172416, %eax # imm = 0x21080000
+; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k1}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa 128(%rdi), %ymm1
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [21474836480,21474836480,21474836480,21474836480]
+; AVX512BW-NEXT:    vpermd %ymm1, %ymm2, %ymm1
+; AVX512BW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k3}
+; AVX512BW-NEXT:    vmovdqa %ymm4, (%rsi)
+; AVX512BW-NEXT:    vmovdqa %ymm5, (%rdx)
+; AVX512BW-NEXT:    vmovdqa %ymm8, (%rcx)
+; AVX512BW-NEXT:    vmovdqa %ymm10, (%r8)
+; AVX512BW-NEXT:    vmovdqa %ymm0, (%r9)
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
   %wide.vec = load <160 x i8>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <160 x i8> %wide.vec, <160 x i8> poison, <32 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35, i32 40, i32 45, i32 50, i32 55, i32 60, i32 65, i32 70, i32 75, i32 80, i32 85, i32 90, i32 95, i32 100, i32 105, i32 110, i32 115, i32 120, i32 125, i32 130, i32 135, i32 140, i32 145, i32 150, i32 155>
   %strided.vec1 = shufflevector <160 x i8> %wide.vec, <160 x i8> poison, <32 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36, i32 41, i32 46, i32 51, i32 56, i32 61, i32 66, i32 71, i32 76, i32 81, i32 86, i32 91, i32 96, i32 101, i32 106, i32 111, i32 116, i32 121, i32 126, i32 131, i32 136, i32 141, i32 146, i32 151, i32 156>
@@ -3878,850 +3771,428 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    vzeroupper
 ; AVX2-ONLY-NEXT:    retq
 ;
-; AVX512F-SLOW-LABEL: load_i8_stride5_vf64:
-; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535]
-; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm23
-; AVX512F-SLOW-NEXT:    vmovdqa64 32(%rdi), %ymm24
-; AVX512F-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm21
-; AVX512F-SLOW-NEXT:    vmovdqa64 96(%rdi), %ymm22
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm5
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm24, %ymm23, %ymm5
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,xmm6[4,9,14],zero,zero,zero,xmm6[2,7,12,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,5,10,15],zero,zero,zero,xmm5[3,8,13],zero,zero,zero,xmm5[u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm6, %xmm5, %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535]
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm5, %ymm7
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm22, %ymm21, %ymm7
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm7[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm8
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[1,6,11,16,21,26,31,20,25,30,19,24,29,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm18, %ymm6, %ymm9
-; AVX512F-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm6
-; AVX512F-SLOW-NEXT:    vmovdqa 224(%rdi), %ymm7
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm10
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm6, %ymm7, %ymm10
-; AVX512F-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm8
-; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm10
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,ymm10[3,8,13,2,7,12,1,6,11,16,21,26,31,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vmovdqa 176(%rdi), %xmm10
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm10[u,u,u,u,u,u,u,u,4,9,14,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vmovdqa 160(%rdi), %xmm11
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm11[u,u,u,u,u,u,u,u,0,5,10,15,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm1 = xmm14[2],xmm13[2],xmm14[3],xmm13[3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-SLOW-NEXT:    vpternlogq $186, %ymm12, %ymm16, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa 144(%rdi), %xmm13
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm13[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm13[1,6,11]
-; AVX512F-SLOW-NEXT:    vmovdqa 128(%rdi), %xmm14
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[u,u,u,u,u,u,u,u,u,u,2,7,12],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpor %xmm12, %xmm15, %xmm12
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm12, %zmm17
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm9, %zmm20, %zmm17
-; AVX512F-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm15
-; AVX512F-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm12
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm5, %ymm9
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm15, %ymm12, %ymm9
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u],zero,zero,zero,xmm2[3,8,13],zero,zero,zero,xmm2[1,6,11]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,4,9,14],zero,zero,zero,xmm9[2,7,12],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpor %xmm2, %xmm9, %xmm2
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm17, %zmm19
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm12, %ymm15, %ymm1
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,zero,zero,xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,0,5,10,15],zero,zero,zero,xmm1[3,8,13],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpor %xmm2, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm5, %ymm2
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm6, %ymm7, %ymm2
-; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,ymm2[4,9,14,3,8,13,2,7,12,17,22,27,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm11[1,6,11],zero,zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm10[0,5,10,15,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm3, %xmm9, %xmm3
-; AVX512F-SLOW-NEXT:    vpternlogq $186, %ymm2, %ymm16, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm1, %ymm16, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm5, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm24, %ymm23, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[1,6,11],zero,zero,zero,zero,xmm1[4,9,14],zero,zero,zero,xmm1[u,u,u]
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[0,5,10,15],zero,zero,zero,xmm1[3,8,13,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm2, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm9, %ymm2
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm22, %ymm2
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm4
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[2,7,12,17,22,27,16,21,26,31,20,25,30,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm18, %ymm1, %ymm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm13[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm13[2,7,12]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm14[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpor %xmm1, %xmm4, %xmm1
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm20, %zmm1
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm17
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm5, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm12, %ymm15, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[u,u,u,1,6,11],zero,zero,zero,zero,xmm1[4,9,14],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u],zero,zero,zero,xmm1[0,5,10,15],zero,zero,zero,xmm1[3,8,13]
-; AVX512F-SLOW-NEXT:    vpor %xmm2, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm2
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm7, %ymm6, %ymm2
-; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,0,5,10,15,4,9,14,3,8,13,18,23,28,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm10[1,6,11,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm11[2,7,12],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm2[3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm1, %ymm16, %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm9, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm23, %ymm24, %ymm1
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[1,6,11],zero,zero,zero,zero,xmm3[4,9,14,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[2,7,12],zero,zero,zero,xmm1[0,5,10,15],zero,zero,zero,xmm1[u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm3, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm3
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm22, %ymm3
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[3,8,13,18,23,28,17,22,27,16,21,26,31,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm18, %ymm1, %ymm3
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm13[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm13[3,8,13]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm14[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpor %xmm1, %xmm4, %xmm1
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm20, %zmm1
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm18
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm9, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm15, %ymm12, %ymm1
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,zero,xmm2[1,6,11],zero,zero,zero,zero,xmm2[4,9,14]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,2,7,12],zero,zero,zero,xmm1[0,5,10,15],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpor %xmm2, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm5, %ymm2
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm7, %ymm6, %ymm2
-; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,1,6,11,0,5,10,15,4,9,14,19,24,29,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm10[2,7,12,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm11[3,8,13],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm2[3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm1, %ymm16, %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm23, %ymm24, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[3,8,13],zero,zero,zero,xmm1[1,6,11],zero,zero,zero,zero,xmm1[u,u,u]
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[2,7,12],zero,zero,zero,xmm1[0,5,10,15,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm3, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm5, %ymm3
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm22, %ymm3
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm13[u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,xmm13[4,9,14]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm14[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpor %xmm1, %xmm4, %xmm1
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm4, %zmm1
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm15, %ymm0, %ymm12
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm12[u,u,u,3,8,13],zero,zero,zero,xmm12[1,6,11],zero,zero,zero,zero
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,zero,xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15]
-; AVX512F-SLOW-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm6, %ymm7, %ymm9
-; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm9
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm9[u,u,u,u,u,u,2,7,12,1,6,11,0,5,10,15,20,25,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm10[3,8,13,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm11[4,9,14],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm6, %xmm7, %xmm6
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm3[3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm2, %ymm16, %ymm3
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm23, %ymm24, %ymm5
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm2[3,8,13],zero,zero,zero,xmm2[1,6,11,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,9,14],zero,zero,zero,xmm5[2,7,12],zero,zero,zero,xmm5[u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm2, %xmm5, %xmm2
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm22, %ymm21, %ymm0
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm0[2,3,0,1]
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm5
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm5 = [21474836480,21474836480,21474836480,21474836480]
-; AVX512F-SLOW-NEXT:    vpermd %ymm2, %ymm5, %ymm2
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm4, %zmm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm19, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm17, (%rdx)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm18, (%rcx)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm1, (%r8)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, (%r9)
-; AVX512F-SLOW-NEXT:    vzeroupper
-; AVX512F-SLOW-NEXT:    retq
-;
-; AVX512F-FAST-LABEL: load_i8_stride5_vf64:
-; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535]
-; AVX512F-FAST-NEXT:    vmovdqa64 (%rdi), %ymm23
-; AVX512F-FAST-NEXT:    vmovdqa64 32(%rdi), %ymm24
-; AVX512F-FAST-NEXT:    vmovdqa64 64(%rdi), %ymm21
-; AVX512F-FAST-NEXT:    vmovdqa64 96(%rdi), %ymm22
-; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm5
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm24, %ymm23, %ymm5
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,xmm6[4,9,14],zero,zero,zero,xmm6[2,7,12,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,5,10,15],zero,zero,zero,xmm5[3,8,13],zero,zero,zero,xmm5[u,u,u]
-; AVX512F-FAST-NEXT:    vpor %xmm6, %xmm5, %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535]
-; AVX512F-FAST-NEXT:    vmovdqa %ymm5, %ymm7
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm22, %ymm21, %ymm7
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm7[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm8
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[1,6,11,16,21,26,31,20,25,30,19,24,29,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255]
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm18, %ymm6, %ymm11
-; AVX512F-FAST-NEXT:    vmovdqa 192(%rdi), %ymm6
-; AVX512F-FAST-NEXT:    vmovdqa 224(%rdi), %ymm7
-; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm9
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm6, %ymm7, %ymm9
-; AVX512F-FAST-NEXT:    vmovdqa 208(%rdi), %xmm8
-; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm9
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,ymm9[3,8,13,2,7,12,1,6,11,16,21,26,31,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vmovdqa 176(%rdi), %xmm9
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm9[u,u,u,u,u,u,u,u,4,9,14,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vmovdqa 160(%rdi), %xmm10
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm10[u,u,u,u,u,u,u,u,0,5,10,15,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm15 = xmm14[2],xmm13[2],xmm14[3],xmm13[3]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-FAST-NEXT:    vpternlogq $186, %ymm12, %ymm16, %ymm15
-; AVX512F-FAST-NEXT:    vmovdqa 144(%rdi), %xmm13
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm13[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm13[1,6,11]
-; AVX512F-FAST-NEXT:    vmovdqa 128(%rdi), %xmm14
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm14[u,u,u,u,u,u,u,u,u,u,2,7,12],zero,zero,zero
-; AVX512F-FAST-NEXT:    vpor %xmm1, %xmm12, %xmm1
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm1, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm11, %zmm20, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqa 256(%rdi), %ymm15
-; AVX512F-FAST-NEXT:    vmovdqa 288(%rdi), %ymm12
-; AVX512F-FAST-NEXT:    vmovdqa %ymm5, %ymm11
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm15, %ymm12, %ymm11
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u],zero,zero,zero,xmm2[3,8,13],zero,zero,zero,xmm2[1,6,11]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,4,9,14],zero,zero,zero,xmm11[2,7,12],zero,zero,zero
-; AVX512F-FAST-NEXT:    vpor %xmm2, %xmm11, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [8,9,10,11,12,17,18,19]
-; AVX512F-FAST-NEXT:    vpermi2d %zmm2, %zmm1, %zmm11
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm1, %zmm19
-; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm12, %ymm15, %ymm1
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,zero,zero,xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,0,5,10,15],zero,zero,zero,xmm1[3,8,13],zero,zero,zero
-; AVX512F-FAST-NEXT:    vpor %xmm2, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa %ymm5, %ymm2
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm6, %ymm7, %ymm2
-; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,ymm2[4,9,14,3,8,13,2,7,12,17,22,27,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm10[1,6,11],zero,zero,zero,zero,xmm10[u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm9[0,5,10,15,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpor %xmm3, %xmm11, %xmm3
-; AVX512F-FAST-NEXT:    vpternlogq $186, %ymm2, %ymm16, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm1, %ymm16, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa %ymm5, %ymm1
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm24, %ymm23, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[1,6,11],zero,zero,zero,zero,xmm1[4,9,14],zero,zero,zero,xmm1[u,u,u]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[0,5,10,15],zero,zero,zero,xmm1[3,8,13,u,u,u]
-; AVX512F-FAST-NEXT:    vpor %xmm2, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vmovdqa %ymm11, %ymm2
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm21, %ymm22, %ymm2
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm4
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[2,7,12,17,22,27,16,21,26,31,20,25,30,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm18, %ymm1, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm13[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm13[2,7,12]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm14[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
-; AVX512F-FAST-NEXT:    vpor %xmm1, %xmm4, %xmm1
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm20, %zmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm17
-; AVX512F-FAST-NEXT:    vmovdqa %ymm5, %ymm1
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm12, %ymm15, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[u,u,u,1,6,11],zero,zero,zero,zero,xmm1[4,9,14],zero,zero,zero
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u],zero,zero,zero,xmm1[0,5,10,15],zero,zero,zero,xmm1[3,8,13]
-; AVX512F-FAST-NEXT:    vpor %xmm2, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm2
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm7, %ymm6, %ymm2
-; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,0,5,10,15,4,9,14,3,8,13,18,23,28,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm9[1,6,11,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm10[2,7,12],zero,zero,zero,xmm10[u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm2[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm1, %ymm16, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa %ymm11, %ymm1
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm23, %ymm24, %ymm1
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[1,6,11],zero,zero,zero,zero,xmm3[4,9,14,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[2,7,12],zero,zero,zero,xmm1[0,5,10,15],zero,zero,zero,xmm1[u,u,u]
-; AVX512F-FAST-NEXT:    vpor %xmm3, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm3
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm21, %ymm22, %ymm3
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[3,8,13,18,23,28,17,22,27,16,21,26,31,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm18, %ymm1, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm13[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm13[3,8,13]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm14[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
-; AVX512F-FAST-NEXT:    vpor %xmm1, %xmm4, %xmm1
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm20, %zmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm18
-; AVX512F-FAST-NEXT:    vmovdqa %ymm11, %ymm1
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm15, %ymm12, %ymm1
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,zero,xmm2[1,6,11],zero,zero,zero,zero,xmm2[4,9,14]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,2,7,12],zero,zero,zero,xmm1[0,5,10,15],zero,zero,zero
-; AVX512F-FAST-NEXT:    vpor %xmm2, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa %ymm5, %ymm2
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm7, %ymm6, %ymm2
-; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,1,6,11,0,5,10,15,4,9,14,19,24,29,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm9[2,7,12,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm10[3,8,13],zero,zero,zero,xmm10[u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm2[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm1, %ymm16, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm23, %ymm24, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[3,8,13],zero,zero,zero,xmm1[1,6,11],zero,zero,zero,zero,xmm1[u,u,u]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[2,7,12],zero,zero,zero,xmm1[0,5,10,15,u,u,u]
-; AVX512F-FAST-NEXT:    vpor %xmm3, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa %ymm5, %ymm3
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm21, %ymm22, %ymm3
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm13[u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,xmm13[4,9,14]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm14[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
-; AVX512F-FAST-NEXT:    vpor %xmm1, %xmm4, %xmm1
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm4, %zmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm15, %ymm0, %ymm12
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm12[u,u,u,3,8,13],zero,zero,zero,xmm12[1,6,11],zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm3
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,zero,xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15]
-; AVX512F-FAST-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm6, %ymm7, %ymm11
-; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm11
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm11[u,u,u,u,u,u,2,7,12,1,6,11,0,5,10,15,20,25,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm9[3,8,13,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm10[4,9,14],zero,zero,zero,xmm10[u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpor %xmm6, %xmm7, %xmm6
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm3[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm2, %ymm16, %ymm3
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm23, %ymm24, %ymm5
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm2[3,8,13],zero,zero,zero,xmm2[1,6,11,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,9,14],zero,zero,zero,xmm5[2,7,12],zero,zero,zero,xmm5[u,u,u,u]
-; AVX512F-FAST-NEXT:    vpor %xmm2, %xmm5, %xmm2
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm22, %ymm21, %ymm0
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm0[2,3,0,1]
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm5
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa 128(%rdi), %ymm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm5 = [21474836480,21474836480,21474836480,21474836480]
-; AVX512F-FAST-NEXT:    vpermd %ymm2, %ymm5, %ymm2
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm4, %zmm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm19, (%rsi)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm17, (%rdx)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm18, (%rcx)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, (%r8)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, (%r9)
-; AVX512F-FAST-NEXT:    vzeroupper
-; AVX512F-FAST-NEXT:    retq
-;
-; AVX512BW-SLOW-LABEL: load_i8_stride5_vf64:
-; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %ymm3
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm2
-; AVX512BW-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm0
-; AVX512BW-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm1
-; AVX512BW-SLOW-NEXT:    movw $21140, %ax # imm = 0x5294
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k2
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm1, %ymm0, %ymm4 {%k2}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    movl $1108344832, %eax # imm = 0x42100000
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm5, %ymm4 {%k1}
-; AVX512BW-SLOW-NEXT:    movw $19026, %ax # imm = 0x4A52
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm5 {%k1}
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,xmm6[4,9,14],zero,zero,zero,xmm6[2,7,12,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,5,10,15],zero,zero,zero,xmm5[3,8,13],zero,zero,zero,xmm5[u,u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm6, %xmm5, %xmm10
-; AVX512BW-SLOW-NEXT:    movl $67100672, %eax # imm = 0x3FFE000
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k5
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 {%k5} = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,1,6,11,16,21,26,31,20,25,30,19,24,29,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vmovdqa 192(%rdi), %ymm5
-; AVX512BW-SLOW-NEXT:    vmovdqa 224(%rdi), %ymm4
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm5, %ymm4, %ymm6 {%k1}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    movl $4228, %eax # imm = 0x1084
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k3
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm7, %ymm6 {%k3}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm6[u,u,u,u,u,u,u,3,8,13,2,7,12,1,6,11,16,21,26,31,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vmovdqa 176(%rdi), %xmm6
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm6[u,u,u,u,u,u,u,u,4,9,14,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vmovdqa 160(%rdi), %xmm7
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm7[u,u,u,u,u,u,u,u,0,5,10,15,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm11[2],xmm9[2],xmm11[3],xmm9[3]
-; AVX512BW-SLOW-NEXT:    movl $127, %eax
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k4
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm9, %ymm8 {%k4}
-; AVX512BW-SLOW-NEXT:    vmovdqa 144(%rdi), %xmm12
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm12[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm12[1,6,11]
-; AVX512BW-SLOW-NEXT:    vmovdqa 128(%rdi), %xmm13
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm13[u,u,u,u,u,u,u,u,u,u,2,7,12],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %xmm9, %xmm11, %xmm9
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm8
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm8, %zmm10 {%k5}
-; AVX512BW-SLOW-NEXT:    vextracti64x4 $1, %zmm10, %ymm11
-; AVX512BW-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm9
-; AVX512BW-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm8
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm9, %ymm8, %ymm14 {%k2}
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm15
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm15[u,u,u,u],zero,zero,zero,xmm15[3,8,13],zero,zero,zero,xmm15[1,6,11]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,4,9,14],zero,zero,zero,xmm14[2,7,12],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %xmm15, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4],ymm14[5,6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm18
-; AVX512BW-SLOW-NEXT:    movw $10570, %ax # imm = 0x294A
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k3
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm0, %ymm1, %ymm11 {%k3}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm11[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    movl $-2078212096, %eax # imm = 0x84210000
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k6
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm14, %ymm11 {%k6}
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm14 {%k2}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[1,6,11],zero,zero,zero,zero,xmm14[4,9,14],zero,zero,zero,xmm14[u,u,u]
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = zero,zero,zero,xmm14[0,5,10,15],zero,zero,zero,xmm14[3,8,13,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm15, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 {%k5} = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,2,7,12,17,22,27,16,21,26,31,20,25,30,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm5, %ymm4, %ymm11 {%k2}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm11[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    movl $8456, %eax # imm = 0x2108
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k6
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm15, %ymm11 {%k6}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,4,9,14,3,8,13,2,7,12,17,22,27,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm7[1,6,11],zero,zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = zero,zero,zero,xmm6[0,5,10,15,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vporq %xmm15, %xmm16, %xmm15
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm15, %ymm11 {%k4}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm12[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm12[2,7,12]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm13[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vporq %xmm15, %xmm16, %xmm15
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm15, %zmm11
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm11, %zmm14 {%k5}
-; AVX512BW-SLOW-NEXT:    vextracti64x4 $1, %zmm14, %ymm11
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm8, %ymm9, %ymm15 {%k1}
-; AVX512BW-SLOW-NEXT:    vextracti32x4 $1, %ymm15, %xmm16
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm16[u,u,u],zero,zero,zero,zero,xmm16[4,9,14],zero,zero,zero,xmm16[2,7,12]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm15[u,u,u,0,5,10,15],zero,zero,zero,xmm15[3,8,13],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vporq %xmm16, %xmm15, %xmm15
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
-; AVX512BW-SLOW-NEXT:    movl $-524288, %eax # imm = 0xFFF80000
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k4
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm15, %ymm11 {%k4}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm14, %zmm19
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm0, %ymm1, %ymm14 {%k1}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm14[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    movl $138543104, %eax # imm = 0x8420000
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k6
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm15, %ymm14 {%k6}
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm3, %ymm2, %ymm15 {%k3}
-; AVX512BW-SLOW-NEXT:    vextracti32x4 $1, %ymm15, %xmm16
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = zero,zero,zero,xmm16[1,6,11],zero,zero,zero,zero,xmm16[4,9,14,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm15[2,7,12],zero,zero,zero,xmm15[0,5,10,15],zero,zero,zero,xmm15[u,u,u]
-; AVX512BW-SLOW-NEXT:    vporq %xmm16, %xmm15, %xmm15
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 {%k5} = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,3,8,13,18,23,28,17,22,27,16,21,26,31,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm4, %ymm5, %ymm14 {%k1}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm14[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    movl $16912, %eax # imm = 0x4210
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k6
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm16, %ymm14 {%k6}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,0,5,10,15,4,9,14,3,8,13,18,23,28,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = zero,zero,zero,xmm6[1,6,11,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = xmm7[2,7,12],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vporq %xmm16, %xmm17, %xmm10
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2],xmm14[3,4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm12[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm12[3,8,13]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm13[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vporq %xmm14, %xmm16, %xmm14
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm14, %zmm10
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm10, %zmm15 {%k5}
-; AVX512BW-SLOW-NEXT:    vextracti64x4 $1, %zmm15, %ymm10
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm8, %ymm9, %ymm14 {%k2}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm14[u,u,u,1,6,11],zero,zero,zero,zero,xmm14[4,9,14],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u],zero,zero,zero,xmm14[0,5,10,15],zero,zero,zero,xmm14[3,8,13]
-; AVX512BW-SLOW-NEXT:    vporq %xmm16, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm14, %ymm10 {%k4}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm15, %zmm14
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm0, %ymm1, %ymm10 {%k2}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm10[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    movl $277086208, %eax # imm = 0x10840000
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k5
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm15, %ymm10 {%k5}
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm3, %ymm2, %ymm15 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm15[3,8,13],zero,zero,zero,xmm15[1,6,11],zero,zero,zero,zero,xmm15[u,u,u]
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm15, %xmm15
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm15[2,7,12],zero,zero,zero,xmm15[0,5,10,15,u,u,u]
-; AVX512BW-SLOW-NEXT:    vporq %xmm16, %xmm15, %xmm15
-; AVX512BW-SLOW-NEXT:    movl $33546240, %eax # imm = 0x1FFE000
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k5
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 {%k5} = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm4, %ymm5, %ymm10 {%k2}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm10[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    movl $33825, %eax # imm = 0x8421
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k5
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm16, %ymm10 {%k5}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,1,6,11,0,5,10,15,4,9,14,19,24,29,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = zero,zero,zero,xmm6[2,7,12,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = xmm7[3,8,13],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vporq %xmm16, %xmm17, %xmm11
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2],xmm10[3,4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm12[u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,xmm12[4,9,14]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm13[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %xmm11, %xmm12, %xmm11
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512BW-SLOW-NEXT:    movl $33554431, %eax # imm = 0x1FFFFFF
-; AVX512BW-SLOW-NEXT:    kmovq %rax, %k5
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm15, %zmm10 {%k5}
-; AVX512BW-SLOW-NEXT:    vextracti64x4 $1, %zmm10, %ymm11
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm9, %ymm8, %ymm12 {%k3}
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm13
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,u],zero,zero,zero,xmm13[1,6,11],zero,zero,zero,zero,xmm13[4,9,14]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u,2,7,12],zero,zero,zero,xmm12[0,5,10,15],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %xmm13, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm12, %ymm11 {%k4}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm10
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm3, %ymm2 {%k2}
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero,xmm2[u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k1}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    movl $554172416, %eax # imm = 0x21080000
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k2
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k2}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm5, %ymm4 {%k3}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm4[2,3,0,1]
-; AVX512BW-SLOW-NEXT:    movl $2114, %eax # imm = 0x842
-; AVX512BW-SLOW-NEXT:    kmovd %eax, %k2
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm1, %ymm4 {%k2}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,2,7,12,1,6,11,0,5,10,15,20,25,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm6[3,8,13,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm7[4,9,14],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[3,4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm2
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [21474836480,21474836480,21474836480,21474836480]
-; AVX512BW-SLOW-NEXT:    vpermd %ymm2, %ymm3, %ymm2
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k5}
-; AVX512BW-SLOW-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm9, %ymm8 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm8[u,u,u,3,8,13],zero,zero,zero,xmm8[1,6,11],zero,zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm3
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,zero,xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15]
-; AVX512BW-SLOW-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm2, %ymm0 {%k4}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm18, (%rsi)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm19, (%rdx)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm14, (%rcx)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm10, (%r8)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%r9)
-; AVX512BW-SLOW-NEXT:    vzeroupper
-; AVX512BW-SLOW-NEXT:    retq
+; AVX512F-LABEL: load_i8_stride5_vf64:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm0 = [65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535]
+; AVX512F-NEXT:    vmovdqa64 (%rdi), %ymm23
+; AVX512F-NEXT:    vmovdqa64 32(%rdi), %ymm24
+; AVX512F-NEXT:    vmovdqa64 64(%rdi), %ymm21
+; AVX512F-NEXT:    vmovdqa64 96(%rdi), %ymm22
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm5
+; AVX512F-NEXT:    vpternlogq $202, %ymm24, %ymm23, %ymm5
+; AVX512F-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,xmm6[4,9,14],zero,zero,zero,xmm6[2,7,12,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,5,10,15],zero,zero,zero,xmm5[3,8,13],zero,zero,zero,xmm5[u,u,u]
+; AVX512F-NEXT:    vpor %xmm6, %xmm5, %xmm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535]
+; AVX512F-NEXT:    vmovdqa %ymm5, %ymm7
+; AVX512F-NEXT:    vpternlogq $202, %ymm22, %ymm21, %ymm7
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm8 = ymm7[2,3,0,1]
+; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm8
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[1,6,11,16,21,26,31,20,25,30,19,24,29],zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpternlogq $248, %ymm18, %ymm6, %ymm9
+; AVX512F-NEXT:    vmovdqa 192(%rdi), %ymm6
+; AVX512F-NEXT:    vmovdqa 224(%rdi), %ymm7
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm10
+; AVX512F-NEXT:    vpternlogq $202, %ymm6, %ymm7, %ymm10
+; AVX512F-NEXT:    vmovdqa 208(%rdi), %xmm8
+; AVX512F-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm10
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,ymm10[3,8,13,2,7,12,1,6,11,16,21,26,31,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vmovdqa 176(%rdi), %xmm10
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm13 = xmm10[u,u,u,u,u,u,u,u,4,9,14,u,u,u,u,u]
+; AVX512F-NEXT:    vmovdqa 160(%rdi), %xmm11
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm14 = xmm11[u,u,u,u,u,u,u,u,0,5,10,15,u,u,u,u]
+; AVX512F-NEXT:    vpunpckhdq {{.*#+}} xmm1 = xmm14[2],xmm13[2],xmm14[3],xmm13[3]
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpternlogq $186, %ymm12, %ymm16, %ymm1
+; AVX512F-NEXT:    vmovdqa 144(%rdi), %xmm13
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm12 = xmm13[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm13[1,6,11]
+; AVX512F-NEXT:    vmovdqa 128(%rdi), %xmm14
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[u,u,u,u,u,u,u,u,u,u,2,7,12],zero,zero,zero
+; AVX512F-NEXT:    vpor %xmm12, %xmm15, %xmm12
+; AVX512F-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm12, %zmm17
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535]
+; AVX512F-NEXT:    vpternlogq $184, %zmm9, %zmm20, %zmm17
+; AVX512F-NEXT:    vmovdqa 256(%rdi), %ymm15
+; AVX512F-NEXT:    vmovdqa 288(%rdi), %ymm12
+; AVX512F-NEXT:    vmovdqa %ymm5, %ymm9
+; AVX512F-NEXT:    vpternlogq $202, %ymm15, %ymm12, %ymm9
+; AVX512F-NEXT:    vextracti128 $1, %ymm9, %xmm2
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u],zero,zero,zero,xmm2[3,8,13],zero,zero,zero,xmm2[1,6,11]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,4,9,14],zero,zero,zero,xmm9[2,7,12],zero,zero,zero
+; AVX512F-NEXT:    vpor %xmm2, %xmm9, %xmm2
+; AVX512F-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm2[5,6,7]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm17, %zmm19
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm1
+; AVX512F-NEXT:    vpternlogq $202, %ymm12, %ymm15, %ymm1
+; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,zero,zero,xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,0,5,10,15],zero,zero,zero,xmm1[3,8,13],zero,zero,zero
+; AVX512F-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vmovdqa %ymm5, %ymm2
+; AVX512F-NEXT:    vpternlogq $202, %ymm6, %ymm7, %ymm2
+; AVX512F-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm2
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,ymm2[4,9,14,3,8,13,2,7,12,17,22,27,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm9 = xmm11[1,6,11],zero,zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm10[0,5,10,15,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm3, %xmm9, %xmm3
+; AVX512F-NEXT:    vpternlogq $186, %ymm2, %ymm16, %ymm3
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-NEXT:    vpternlogq $226, %ymm1, %ymm16, %ymm3
+; AVX512F-NEXT:    vmovdqa %ymm5, %ymm1
+; AVX512F-NEXT:    vpternlogq $202, %ymm24, %ymm23, %ymm1
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[1,6,11],zero,zero,zero,zero,xmm1[4,9,14],zero,zero,zero,xmm1[u,u,u]
+; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[0,5,10,15],zero,zero,zero,xmm1[3,8,13,u,u,u]
+; AVX512F-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,0,65535,0,65535,65535,0,65535,0,65535,65535,0,65535,0,65535,65535]
+; AVX512F-NEXT:    vmovdqa %ymm9, %ymm2
+; AVX512F-NEXT:    vpternlogq $202, %ymm21, %ymm22, %ymm2
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
+; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm4
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[2,7,12,17,22,27,16,21,26,31,20,25,30],zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpternlogq $248, %ymm18, %ymm1, %ymm2
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = xmm13[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm13[2,7,12]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm4 = xmm14[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
+; AVX512F-NEXT:    vpor %xmm1, %xmm4, %xmm1
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vpternlogq $184, %zmm2, %zmm20, %zmm1
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm17
+; AVX512F-NEXT:    vmovdqa %ymm5, %ymm1
+; AVX512F-NEXT:    vpternlogq $202, %ymm12, %ymm15, %ymm1
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[u,u,u,1,6,11],zero,zero,zero,zero,xmm1[4,9,14],zero,zero,zero
+; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u],zero,zero,zero,xmm1[0,5,10,15],zero,zero,zero,xmm1[3,8,13]
+; AVX512F-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm2
+; AVX512F-NEXT:    vpternlogq $202, %ymm7, %ymm6, %ymm2
+; AVX512F-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm2
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,0,5,10,15,4,9,14,3,8,13,18,23,28,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm10[1,6,11,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm4 = xmm11[2,7,12],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm2[3,4,5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vpternlogq $226, %ymm1, %ymm16, %ymm2
+; AVX512F-NEXT:    vmovdqa %ymm9, %ymm1
+; AVX512F-NEXT:    vpternlogq $202, %ymm23, %ymm24, %ymm1
+; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[1,6,11],zero,zero,zero,zero,xmm3[4,9,14,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[2,7,12],zero,zero,zero,xmm1[0,5,10,15],zero,zero,zero,xmm1[u,u,u]
+; AVX512F-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm3
+; AVX512F-NEXT:    vpternlogq $202, %ymm21, %ymm22, %ymm3
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
+; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[3,8,13,18,23,28,17,22,27,16,21,26,31],zero,zero,zero,zero,zero,zero
+; AVX512F-NEXT:    vpternlogq $248, %ymm18, %ymm1, %ymm3
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = xmm13[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm13[3,8,13]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm4 = xmm14[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
+; AVX512F-NEXT:    vpor %xmm1, %xmm4, %xmm1
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vpternlogq $184, %zmm3, %zmm20, %zmm1
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm18
+; AVX512F-NEXT:    vmovdqa %ymm9, %ymm1
+; AVX512F-NEXT:    vpternlogq $202, %ymm15, %ymm12, %ymm1
+; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,zero,xmm2[1,6,11],zero,zero,zero,zero,xmm2[4,9,14]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,2,7,12],zero,zero,zero,xmm1[0,5,10,15],zero,zero,zero
+; AVX512F-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vmovdqa %ymm5, %ymm2
+; AVX512F-NEXT:    vpternlogq $202, %ymm7, %ymm6, %ymm2
+; AVX512F-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm2
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,1,6,11,0,5,10,15,4,9,14,19,24,29,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm10[2,7,12,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm4 = xmm11[3,8,13],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm2[3,4,5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-NEXT:    vpternlogq $226, %ymm1, %ymm16, %ymm2
+; AVX512F-NEXT:    vmovdqa %ymm0, %ymm1
+; AVX512F-NEXT:    vpternlogq $202, %ymm23, %ymm24, %ymm1
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[3,8,13],zero,zero,zero,xmm1[1,6,11],zero,zero,zero,zero,xmm1[u,u,u]
+; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[2,7,12],zero,zero,zero,xmm1[0,5,10,15,u,u,u]
+; AVX512F-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vmovdqa %ymm5, %ymm3
+; AVX512F-NEXT:    vpternlogq $202, %ymm21, %ymm22, %ymm3
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
+; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm1 = xmm13[u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,xmm13[4,9,14]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm4 = xmm14[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
+; AVX512F-NEXT:    vpor %xmm1, %xmm4, %xmm1
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpternlogq $226, %zmm3, %zmm4, %zmm1
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-NEXT:    vpternlogq $226, %ymm15, %ymm0, %ymm12
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = xmm12[u,u,u,3,8,13],zero,zero,zero,xmm12[1,6,11],zero,zero,zero,zero
+; AVX512F-NEXT:    vextracti128 $1, %ymm12, %xmm3
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,zero,xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15]
+; AVX512F-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX512F-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpternlogq $202, %ymm6, %ymm7, %ymm9
+; AVX512F-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm9
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm3 = ymm9[u,u,u,u,u,u,2,7,12,1,6,11,0,5,10,15,20,25,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm10[3,8,13,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm7 = xmm11[4,9,14],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm6, %xmm7, %xmm6
+; AVX512F-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm3[3,4,5,6,7]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-NEXT:    vpternlogq $226, %ymm2, %ymm16, %ymm3
+; AVX512F-NEXT:    vpternlogq $202, %ymm23, %ymm24, %ymm5
+; AVX512F-NEXT:    vextracti128 $1, %ymm5, %xmm2
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm2[3,8,13],zero,zero,zero,xmm2[1,6,11,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,9,14],zero,zero,zero,xmm5[2,7,12],zero,zero,zero,xmm5[u,u,u,u]
+; AVX512F-NEXT:    vpor %xmm2, %xmm5, %xmm2
+; AVX512F-NEXT:    vpternlogq $202, %ymm22, %ymm21, %ymm0
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm5 = ymm0[2,3,0,1]
+; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm5
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
+; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm2
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm5 = [21474836480,21474836480,21474836480,21474836480]
+; AVX512F-NEXT:    vpermd %ymm2, %ymm5, %ymm2
+; AVX512F-NEXT:    vpternlogq $226, %zmm0, %zmm4, %zmm2
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm0
+; AVX512F-NEXT:    vmovdqa64 %zmm19, (%rsi)
+; AVX512F-NEXT:    vmovdqa64 %zmm17, (%rdx)
+; AVX512F-NEXT:    vmovdqa64 %zmm18, (%rcx)
+; AVX512F-NEXT:    vmovdqa64 %zmm1, (%r8)
+; AVX512F-NEXT:    vmovdqa64 %zmm0, (%r9)
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
 ;
-; AVX512BW-FAST-LABEL: load_i8_stride5_vf64:
-; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %ymm3
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %ymm2
-; AVX512BW-FAST-NEXT:    vmovdqa 64(%rdi), %ymm0
-; AVX512BW-FAST-NEXT:    vmovdqa 96(%rdi), %ymm1
-; AVX512BW-FAST-NEXT:    movw $21140, %ax # imm = 0x5294
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k2
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm1, %ymm0, %ymm4 {%k2}
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
-; AVX512BW-FAST-NEXT:    movl $1108344832, %eax # imm = 0x42100000
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm5, %ymm4 {%k1}
-; AVX512BW-FAST-NEXT:    movw $19026, %ax # imm = 0x4A52
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm2, %ymm3, %ymm5 {%k1}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,xmm6[4,9,14],zero,zero,zero,xmm6[2,7,12,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,5,10,15],zero,zero,zero,xmm5[3,8,13],zero,zero,zero,xmm5[u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm6, %xmm5, %xmm10
-; AVX512BW-FAST-NEXT:    movl $67100672, %eax # imm = 0x3FFE000
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k5
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm10 {%k5} = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,1,6,11,16,21,26,31,20,25,30,19,24,29,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vmovdqa 192(%rdi), %ymm5
-; AVX512BW-FAST-NEXT:    vmovdqa 224(%rdi), %ymm4
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm5, %ymm4, %ymm6 {%k1}
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
-; AVX512BW-FAST-NEXT:    movl $4228, %eax # imm = 0x1084
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k3
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm7, %ymm6 {%k3}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm6[u,u,u,u,u,u,u,3,8,13,2,7,12,1,6,11,16,21,26,31,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vmovdqa 176(%rdi), %xmm6
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm6[u,u,u,u,u,u,u,u,4,9,14,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vmovdqa 160(%rdi), %xmm7
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm7[u,u,u,u,u,u,u,u,0,5,10,15,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm11[2],xmm9[2],xmm11[3],xmm9[3]
-; AVX512BW-FAST-NEXT:    movl $127, %eax
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k4
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm9, %ymm8 {%k4}
-; AVX512BW-FAST-NEXT:    vmovdqa 144(%rdi), %xmm11
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm11[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm11[1,6,11]
-; AVX512BW-FAST-NEXT:    vmovdqa 128(%rdi), %xmm12
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm12[u,u,u,u,u,u,u,u,u,u,2,7,12],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpor %xmm9, %xmm13, %xmm9
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm8
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm8, %zmm10 {%k5}
-; AVX512BW-FAST-NEXT:    vmovdqa 256(%rdi), %ymm9
-; AVX512BW-FAST-NEXT:    vmovdqa 288(%rdi), %ymm8
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm9, %ymm8, %ymm13 {%k2}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm14
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u],zero,zero,zero,xmm14[3,8,13],zero,zero,zero,xmm14[1,6,11]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,u,u,4,9,14],zero,zero,zero,xmm13[2,7,12],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpor %xmm14, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [8,9,10,11,12,17,18,19]
-; AVX512BW-FAST-NEXT:    vpermi2d %zmm13, %zmm10, %zmm14
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm10, %zmm18
-; AVX512BW-FAST-NEXT:    movw $10570, %ax # imm = 0x294A
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k3
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm0, %ymm1, %ymm13 {%k3}
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm13[2,3,0,1]
-; AVX512BW-FAST-NEXT:    movl $-2078212096, %eax # imm = 0x84210000
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k6
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm14, %ymm13 {%k6}
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm2, %ymm3, %ymm14 {%k2}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[1,6,11],zero,zero,zero,zero,xmm14[4,9,14],zero,zero,zero,xmm14[u,u,u]
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = zero,zero,zero,xmm14[0,5,10,15],zero,zero,zero,xmm14[3,8,13,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm15, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm14 {%k5} = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,2,7,12,17,22,27,16,21,26,31,20,25,30,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm5, %ymm4, %ymm13 {%k2}
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm13[2,3,0,1]
-; AVX512BW-FAST-NEXT:    movl $8456, %eax # imm = 0x2108
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k6
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm15, %ymm13 {%k6}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,4,9,14,3,8,13,2,7,12,17,22,27,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm7[1,6,11],zero,zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = zero,zero,zero,xmm6[0,5,10,15,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vporq %xmm15, %xmm16, %xmm15
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm15, %ymm13 {%k4}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm11[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm11[2,7,12]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = xmm12[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vporq %xmm15, %xmm16, %xmm15
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm15, %zmm13
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm13, %zmm14 {%k5}
-; AVX512BW-FAST-NEXT:    vextracti64x4 $1, %zmm14, %ymm13
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm8, %ymm9, %ymm15 {%k1}
-; AVX512BW-FAST-NEXT:    vextracti32x4 $1, %ymm15, %xmm16
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = xmm16[u,u,u],zero,zero,zero,zero,xmm16[4,9,14],zero,zero,zero,xmm16[2,7,12]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm15[u,u,u,0,5,10,15],zero,zero,zero,xmm15[3,8,13],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vporq %xmm16, %xmm15, %xmm15
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
-; AVX512BW-FAST-NEXT:    movl $-524288, %eax # imm = 0xFFF80000
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k4
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm15, %ymm13 {%k4}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm14, %zmm19
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm0, %ymm1, %ymm14 {%k1}
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm14[2,3,0,1]
-; AVX512BW-FAST-NEXT:    movl $138543104, %eax # imm = 0x8420000
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k6
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm15, %ymm14 {%k6}
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm3, %ymm2, %ymm15 {%k3}
-; AVX512BW-FAST-NEXT:    vextracti32x4 $1, %ymm15, %xmm16
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = zero,zero,zero,xmm16[1,6,11],zero,zero,zero,zero,xmm16[4,9,14,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm15[2,7,12],zero,zero,zero,xmm15[0,5,10,15],zero,zero,zero,xmm15[u,u,u]
-; AVX512BW-FAST-NEXT:    vporq %xmm16, %xmm15, %xmm15
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm15 {%k5} = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,3,8,13,18,23,28,17,22,27,16,21,26,31,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm4, %ymm5, %ymm14 {%k1}
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm16 = ymm14[2,3,0,1]
-; AVX512BW-FAST-NEXT:    movl $16912, %eax # imm = 0x4210
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k6
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm16, %ymm14 {%k6}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,0,5,10,15,4,9,14,3,8,13,18,23,28,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = zero,zero,zero,xmm6[1,6,11,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm7[2,7,12],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vporq %xmm16, %xmm17, %xmm10
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2],xmm14[3,4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm14[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm11[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm11[3,8,13]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = xmm12[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vporq %xmm14, %xmm16, %xmm14
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm14, %zmm10
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm10, %zmm15 {%k5}
-; AVX512BW-FAST-NEXT:    vextracti64x4 $1, %zmm15, %ymm10
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm8, %ymm9, %ymm14 {%k2}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = xmm14[u,u,u,1,6,11],zero,zero,zero,zero,xmm14[4,9,14],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u],zero,zero,zero,xmm14[0,5,10,15],zero,zero,zero,xmm14[3,8,13]
-; AVX512BW-FAST-NEXT:    vporq %xmm16, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm14, %ymm10 {%k4}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm15, %zmm14
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm0, %ymm1, %ymm10 {%k2}
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm10[2,3,0,1]
-; AVX512BW-FAST-NEXT:    movl $277086208, %eax # imm = 0x10840000
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k5
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm15, %ymm10 {%k5}
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm3, %ymm2, %ymm15 {%k1}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = xmm15[3,8,13],zero,zero,zero,xmm15[1,6,11],zero,zero,zero,zero,xmm15[u,u,u]
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm15
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm15[2,7,12],zero,zero,zero,xmm15[0,5,10,15,u,u,u]
-; AVX512BW-FAST-NEXT:    vporq %xmm16, %xmm15, %xmm15
-; AVX512BW-FAST-NEXT:    movl $33546240, %eax # imm = 0x1FFE000
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k5
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm15 {%k5} = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm4, %ymm5, %ymm10 {%k2}
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm16 = ymm10[2,3,0,1]
-; AVX512BW-FAST-NEXT:    movl $33825, %eax # imm = 0x8421
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k5
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm16, %ymm10 {%k5}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,1,6,11,0,5,10,15,4,9,14,19,24,29,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = zero,zero,zero,xmm6[2,7,12,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm7[3,8,13],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vporq %xmm16, %xmm17, %xmm13
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0,1,2],xmm10[3,4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm13[0,1,2,3],ymm10[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,xmm11[4,9,14]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpor %xmm11, %xmm12, %xmm11
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512BW-FAST-NEXT:    movl $33554431, %eax # imm = 0x1FFFFFF
-; AVX512BW-FAST-NEXT:    kmovq %rax, %k5
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm15, %zmm10 {%k5}
-; AVX512BW-FAST-NEXT:    vextracti64x4 $1, %zmm10, %ymm11
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm9, %ymm8, %ymm12 {%k3}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm13
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,u],zero,zero,zero,xmm13[1,6,11],zero,zero,zero,zero,xmm13[4,9,14]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u,2,7,12],zero,zero,zero,xmm12[0,5,10,15],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpor %xmm13, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm12, %ymm11 {%k4}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm10
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm3, %ymm2 {%k2}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero,xmm2[u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k1}
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX512BW-FAST-NEXT:    movl $554172416, %eax # imm = 0x21080000
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k2
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k2}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm5, %ymm4 {%k3}
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm4[2,3,0,1]
-; AVX512BW-FAST-NEXT:    movl $2114, %eax # imm = 0x842
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k2
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm1, %ymm4 {%k2}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,2,7,12,1,6,11,0,5,10,15,20,25,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm6[3,8,13,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm7[4,9,14],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[3,4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vmovdqa 128(%rdi), %ymm2
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [21474836480,21474836480,21474836480,21474836480]
-; AVX512BW-FAST-NEXT:    vpermd %ymm2, %ymm3, %ymm2
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k5}
-; AVX512BW-FAST-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm9, %ymm8 {%k1}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm8[u,u,u,3,8,13],zero,zero,zero,xmm8[1,6,11],zero,zero,zero,zero
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm3
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,zero,xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15]
-; AVX512BW-FAST-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm2, %ymm0 {%k4}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm18, (%rsi)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm19, (%rdx)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm14, (%rcx)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm10, (%r8)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%r9)
-; AVX512BW-FAST-NEXT:    vzeroupper
-; AVX512BW-FAST-NEXT:    retq
+; AVX512BW-LABEL: load_i8_stride5_vf64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm3
+; AVX512BW-NEXT:    vmovdqa 32(%rdi), %ymm2
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %ymm0
+; AVX512BW-NEXT:    vmovdqa 96(%rdi), %ymm1
+; AVX512BW-NEXT:    movw $21140, %ax # imm = 0x5294
+; AVX512BW-NEXT:    kmovd %eax, %k2
+; AVX512BW-NEXT:    vpblendmw %ymm1, %ymm0, %ymm4 {%k2}
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
+; AVX512BW-NEXT:    movl $1108344832, %eax # imm = 0x42100000
+; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    vmovdqu8 %ymm5, %ymm4 {%k1}
+; AVX512BW-NEXT:    movw $19026, %ax # imm = 0x4A52
+; AVX512BW-NEXT:    kmovd %eax, %k1
+; AVX512BW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm5 {%k1}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm5, %xmm6
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,xmm6[4,9,14],zero,zero,zero,xmm6[2,7,12,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,5,10,15],zero,zero,zero,xmm5[3,8,13],zero,zero,zero,xmm5[u,u,u]
+; AVX512BW-NEXT:    vpor %xmm6, %xmm5, %xmm10
+; AVX512BW-NEXT:    movl $67100672, %eax # imm = 0x3FFE000
+; AVX512BW-NEXT:    kmovd %eax, %k5
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm10 {%k5} = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,1,6,11,16,21,26,31,20,25,30,19,24,29,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vmovdqa 192(%rdi), %ymm5
+; AVX512BW-NEXT:    vmovdqa 224(%rdi), %ymm4
+; AVX512BW-NEXT:    vpblendmw %ymm5, %ymm4, %ymm6 {%k1}
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
+; AVX512BW-NEXT:    movl $4228, %eax # imm = 0x1084
+; AVX512BW-NEXT:    kmovd %eax, %k3
+; AVX512BW-NEXT:    vmovdqu8 %ymm7, %ymm6 {%k3}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm6[u,u,u,u,u,u,u,3,8,13,2,7,12,1,6,11,16,21,26,31,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vmovdqa 176(%rdi), %xmm6
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm6[u,u,u,u,u,u,u,u,4,9,14,u,u,u,u,u]
+; AVX512BW-NEXT:    vmovdqa 160(%rdi), %xmm7
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm7[u,u,u,u,u,u,u,u,0,5,10,15,u,u,u,u]
+; AVX512BW-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm11[2],xmm9[2],xmm11[3],xmm9[3]
+; AVX512BW-NEXT:    movl $127, %eax
+; AVX512BW-NEXT:    kmovd %eax, %k4
+; AVX512BW-NEXT:    vmovdqu8 %ymm9, %ymm8 {%k4}
+; AVX512BW-NEXT:    vmovdqa 144(%rdi), %xmm12
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm12[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm12[1,6,11]
+; AVX512BW-NEXT:    vmovdqa 128(%rdi), %xmm13
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm13[u,u,u,u,u,u,u,u,u,u,2,7,12],zero,zero,zero
+; AVX512BW-NEXT:    vpor %xmm9, %xmm11, %xmm9
+; AVX512BW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm8
+; AVX512BW-NEXT:    vmovdqu16 %zmm8, %zmm10 {%k5}
+; AVX512BW-NEXT:    vextracti64x4 $1, %zmm10, %ymm11
+; AVX512BW-NEXT:    vmovdqa 256(%rdi), %ymm9
+; AVX512BW-NEXT:    vmovdqa 288(%rdi), %ymm8
+; AVX512BW-NEXT:    vpblendmw %ymm9, %ymm8, %ymm14 {%k2}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm14, %xmm15
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm15[u,u,u,u],zero,zero,zero,xmm15[3,8,13],zero,zero,zero,xmm15[1,6,11]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,4,9,14],zero,zero,zero,xmm14[2,7,12],zero,zero,zero
+; AVX512BW-NEXT:    vpor %xmm15, %xmm14, %xmm14
+; AVX512BW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4],ymm14[5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm18
+; AVX512BW-NEXT:    movw $10570, %ax # imm = 0x294A
+; AVX512BW-NEXT:    kmovd %eax, %k3
+; AVX512BW-NEXT:    vpblendmw %ymm0, %ymm1, %ymm11 {%k3}
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm14 = ymm11[2,3,0,1]
+; AVX512BW-NEXT:    movl $-2078212096, %eax # imm = 0x84210000
+; AVX512BW-NEXT:    kmovd %eax, %k6
+; AVX512BW-NEXT:    vmovdqu8 %ymm14, %ymm11 {%k6}
+; AVX512BW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm14 {%k2}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[1,6,11],zero,zero,zero,zero,xmm14[4,9,14],zero,zero,zero,xmm14[u,u,u]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm14, %xmm14
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm14 = zero,zero,zero,xmm14[0,5,10,15],zero,zero,zero,xmm14[3,8,13,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm15, %xmm14, %xmm14
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm14 {%k5} = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,2,7,12,17,22,27,16,21,26,31,20,25,30,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpblendmw %ymm5, %ymm4, %ymm11 {%k2}
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm15 = ymm11[2,3,0,1]
+; AVX512BW-NEXT:    movl $8456, %eax # imm = 0x2108
+; AVX512BW-NEXT:    kmovd %eax, %k6
+; AVX512BW-NEXT:    vmovdqu8 %ymm15, %ymm11 {%k6}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,4,9,14,3,8,13,2,7,12,17,22,27,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm7[1,6,11],zero,zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm16 = zero,zero,zero,xmm6[0,5,10,15,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vporq %xmm15, %xmm16, %xmm15
+; AVX512BW-NEXT:    vmovdqu8 %ymm15, %ymm11 {%k4}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm12[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm12[2,7,12]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm13[u,u,u,u,u,u,u,u,u,u,3,8,13],zero,zero,zero
+; AVX512BW-NEXT:    vporq %xmm15, %xmm16, %xmm15
+; AVX512BW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm11, %zmm15, %zmm11
+; AVX512BW-NEXT:    vmovdqu16 %zmm11, %zmm14 {%k5}
+; AVX512BW-NEXT:    vextracti64x4 $1, %zmm14, %ymm11
+; AVX512BW-NEXT:    vpblendmw %ymm8, %ymm9, %ymm15 {%k1}
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm15, %xmm16
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm16[u,u,u],zero,zero,zero,zero,xmm16[4,9,14],zero,zero,zero,xmm16[2,7,12]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm15[u,u,u,0,5,10,15],zero,zero,zero,xmm15[3,8,13],zero,zero,zero
+; AVX512BW-NEXT:    vporq %xmm16, %xmm15, %xmm15
+; AVX512BW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
+; AVX512BW-NEXT:    movl $-524288, %eax # imm = 0xFFF80000
+; AVX512BW-NEXT:    kmovd %eax, %k4
+; AVX512BW-NEXT:    vmovdqu8 %ymm15, %ymm11 {%k4}
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm11, %zmm14, %zmm19
+; AVX512BW-NEXT:    vpblendmw %ymm0, %ymm1, %ymm14 {%k1}
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm15 = ymm14[2,3,0,1]
+; AVX512BW-NEXT:    movl $138543104, %eax # imm = 0x8420000
+; AVX512BW-NEXT:    kmovd %eax, %k6
+; AVX512BW-NEXT:    vmovdqu8 %ymm15, %ymm14 {%k6}
+; AVX512BW-NEXT:    vpblendmw %ymm3, %ymm2, %ymm15 {%k3}
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm15, %xmm16
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm16 = zero,zero,zero,xmm16[1,6,11],zero,zero,zero,zero,xmm16[4,9,14,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm15[2,7,12],zero,zero,zero,xmm15[0,5,10,15],zero,zero,zero,xmm15[u,u,u]
+; AVX512BW-NEXT:    vporq %xmm16, %xmm15, %xmm15
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm15 {%k5} = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,3,8,13,18,23,28,17,22,27,16,21,26,31,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpblendmw %ymm4, %ymm5, %ymm14 {%k1}
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm16 = ymm14[2,3,0,1]
+; AVX512BW-NEXT:    movl $16912, %eax # imm = 0x4210
+; AVX512BW-NEXT:    kmovd %eax, %k6
+; AVX512BW-NEXT:    vmovdqu8 %ymm16, %ymm14 {%k6}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,0,5,10,15,4,9,14,3,8,13,18,23,28,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm16 = zero,zero,zero,xmm6[1,6,11,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm17 = xmm7[2,7,12],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vporq %xmm16, %xmm17, %xmm10
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2],xmm14[3,4,5,6,7]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm14[4,5,6,7]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm12[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm12[3,8,13]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm13[u,u,u,u,u,u,u,u,u,u,4,9,14],zero,zero,zero
+; AVX512BW-NEXT:    vporq %xmm14, %xmm16, %xmm14
+; AVX512BW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm10, %zmm14, %zmm10
+; AVX512BW-NEXT:    vmovdqu16 %zmm10, %zmm15 {%k5}
+; AVX512BW-NEXT:    vextracti64x4 $1, %zmm15, %ymm10
+; AVX512BW-NEXT:    vpblendmw %ymm8, %ymm9, %ymm14 {%k2}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm14[u,u,u,1,6,11],zero,zero,zero,zero,xmm14[4,9,14],zero,zero,zero
+; AVX512BW-NEXT:    vextracti128 $1, %ymm14, %xmm14
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u],zero,zero,zero,xmm14[0,5,10,15],zero,zero,zero,xmm14[3,8,13]
+; AVX512BW-NEXT:    vporq %xmm16, %xmm14, %xmm14
+; AVX512BW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512BW-NEXT:    vmovdqu8 %ymm14, %ymm10 {%k4}
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm10, %zmm15, %zmm14
+; AVX512BW-NEXT:    vpblendmw %ymm0, %ymm1, %ymm10 {%k2}
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm15 = ymm10[2,3,0,1]
+; AVX512BW-NEXT:    movl $277086208, %eax # imm = 0x10840000
+; AVX512BW-NEXT:    kmovd %eax, %k5
+; AVX512BW-NEXT:    vmovdqu8 %ymm15, %ymm10 {%k5}
+; AVX512BW-NEXT:    vpblendmw %ymm3, %ymm2, %ymm15 {%k1}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm15[3,8,13],zero,zero,zero,xmm15[1,6,11],zero,zero,zero,zero,xmm15[u,u,u]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm15, %xmm15
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm15[2,7,12],zero,zero,zero,xmm15[0,5,10,15,u,u,u]
+; AVX512BW-NEXT:    vporq %xmm16, %xmm15, %xmm15
+; AVX512BW-NEXT:    movl $33546240, %eax # imm = 0x1FFE000
+; AVX512BW-NEXT:    kmovd %eax, %k5
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm15 {%k5} = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,4,9,14,19,24,29,18,23,28,17,22,27,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpblendmw %ymm4, %ymm5, %ymm10 {%k2}
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm16 = ymm10[2,3,0,1]
+; AVX512BW-NEXT:    movl $33825, %eax # imm = 0x8421
+; AVX512BW-NEXT:    kmovd %eax, %k5
+; AVX512BW-NEXT:    vmovdqu8 %ymm16, %ymm10 {%k5}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,1,6,11,0,5,10,15,4,9,14,19,24,29,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm16 = zero,zero,zero,xmm6[2,7,12,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm17 = xmm7[3,8,13],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vporq %xmm16, %xmm17, %xmm11
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0,1,2],xmm10[3,4,5,6,7]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm12[u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,xmm12[4,9,14]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm13[u,u,u,u,u,u,u,u,u,0,5,10,15],zero,zero,zero
+; AVX512BW-NEXT:    vpor %xmm11, %xmm12, %xmm11
+; AVX512BW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
+; AVX512BW-NEXT:    movl $33554431, %eax # imm = 0x1FFFFFF
+; AVX512BW-NEXT:    kmovq %rax, %k5
+; AVX512BW-NEXT:    vmovdqu8 %zmm15, %zmm10 {%k5}
+; AVX512BW-NEXT:    vextracti64x4 $1, %zmm10, %ymm11
+; AVX512BW-NEXT:    vpblendmw %ymm9, %ymm8, %ymm12 {%k3}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm12, %xmm13
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,u],zero,zero,zero,xmm13[1,6,11],zero,zero,zero,zero,xmm13[4,9,14]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u,2,7,12],zero,zero,zero,xmm12[0,5,10,15],zero,zero,zero
+; AVX512BW-NEXT:    vpor %xmm13, %xmm12, %xmm12
+; AVX512BW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512BW-NEXT:    vmovdqu8 %ymm12, %ymm11 {%k4}
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm10
+; AVX512BW-NEXT:    vmovdqu16 %ymm3, %ymm2 {%k2}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[3,8,13],zero,zero,zero,xmm3[1,6,11,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,9,14],zero,zero,zero,xmm2[2,7,12],zero,zero,zero,xmm2[u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX512BW-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k1}
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
+; AVX512BW-NEXT:    movl $554172416, %eax # imm = 0x21080000
+; AVX512BW-NEXT:    kmovd %eax, %k2
+; AVX512BW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k2}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,0,5,10,15,20,25,30,19,24,29,18,23,28,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3,4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu16 %ymm5, %ymm4 {%k3}
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm1 = ymm4[2,3,0,1]
+; AVX512BW-NEXT:    movl $2114, %eax # imm = 0x842
+; AVX512BW-NEXT:    kmovd %eax, %k2
+; AVX512BW-NEXT:    vmovdqu8 %ymm1, %ymm4 {%k2}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,2,7,12,1,6,11,0,5,10,15,20,25,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm6[3,8,13,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm7[4,9,14],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[3,4,5,6,7]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqa 128(%rdi), %ymm2
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,1,6,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,21,26,31,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [21474836480,21474836480,21474836480,21474836480]
+; AVX512BW-NEXT:    vpermd %ymm2, %ymm3, %ymm2
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k5}
+; AVX512BW-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
+; AVX512BW-NEXT:    vmovdqu16 %ymm9, %ymm8 {%k1}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm8[u,u,u,3,8,13],zero,zero,zero,xmm8[1,6,11],zero,zero,zero,zero
+; AVX512BW-NEXT:    vextracti128 $1, %ymm8, %xmm3
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,zero,xmm3[2,7,12],zero,zero,zero,xmm3[0,5,10,15]
+; AVX512BW-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX512BW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-NEXT:    vmovdqu8 %ymm2, %ymm0 {%k4}
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm18, (%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm19, (%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm14, (%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%r9)
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
   %wide.vec = load <320 x i8>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <320 x i8> %wide.vec, <320 x i8> poison, <64 x i32> <i32 0, i32 5, i32 10, i32 15, i32 20, i32 25, i32 30, i32 35, i32 40, i32 45, i32 50, i32 55, i32 60, i32 65, i32 70, i32 75, i32 80, i32 85, i32 90, i32 95, i32 100, i32 105, i32 110, i32 115, i32 120, i32 125, i32 130, i32 135, i32 140, i32 145, i32 150, i32 155, i32 160, i32 165, i32 170, i32 175, i32 180, i32 185, i32 190, i32 195, i32 200, i32 205, i32 210, i32 215, i32 220, i32 225, i32 230, i32 235, i32 240, i32 245, i32 250, i32 255, i32 260, i32 265, i32 270, i32 275, i32 280, i32 285, i32 290, i32 295, i32 300, i32 305, i32 310, i32 315>
   %strided.vec1 = shufflevector <320 x i8> %wide.vec, <320 x i8> poison, <64 x i32> <i32 1, i32 6, i32 11, i32 16, i32 21, i32 26, i32 31, i32 36, i32 41, i32 46, i32 51, i32 56, i32 61, i32 66, i32 71, i32 76, i32 81, i32 86, i32 91, i32 96, i32 101, i32 106, i32 111, i32 116, i32 121, i32 126, i32 131, i32 136, i32 141, i32 146, i32 151, i32 156, i32 161, i32 166, i32 171, i32 176, i32 181, i32 186, i32 191, i32 196, i32 201, i32 206, i32 211, i32 216, i32 221, i32 226, i32 231, i32 236, i32 241, i32 246, i32 251, i32 256, i32 261, i32 266, i32 271, i32 276, i32 281, i32 286, i32 291, i32 296, i32 301, i32 306, i32 311, i32 316>
@@ -4741,14 +4212,18 @@ define void @load_i8_stride5_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-FAST-PERLANE: {{.*}}
 ; AVX2-SLOW: {{.*}}
 ; AVX512: {{.*}}
+; AVX512BW-FAST: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
+; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
+; AVX512F-FAST: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
+; AVX512F-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}
 ; FALLBACK10: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
index b4d416e774adf..95800b30987ae 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
@@ -2154,218 +2154,113 @@ define void @load_i8_stride6_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
-; AVX512BW-SLOW-LABEL: load_i8_stride6_vf32:
-; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %ymm4
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm0
-; AVX512BW-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm3
-; AVX512BW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm2
-; AVX512BW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm3, %ymm8
-; AVX512BW-SLOW-NEXT:    movw $-28124, %r10w # imm = 0x9224
-; AVX512BW-SLOW-NEXT:    kmovd %r10d, %k2
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm1, %ymm8, %ymm6 {%k2}
-; AVX512BW-SLOW-NEXT:    movw $18724, %r10w # imm = 0x4924
-; AVX512BW-SLOW-NEXT:    kmovd %r10d, %k1
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm0, %ymm4, %ymm7 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm7[0,6,12],zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm9
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm9[2,8,14],zero,zero,xmm9[0,6,12,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm3, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    movl $4192256, %r10d # imm = 0x3FF800
-; AVX512BW-SLOW-NEXT:    kmovd %r10d, %k3
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 {%k3} = ymm6[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm3
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm10 {%k1}
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm11
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[0,6,12],zero,zero,zero,xmm11[4,10]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm10[u,u,u,u,u,u,4,10],zero,zero,zero,xmm10[2,8,14],zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %xmm12, %xmm13, %xmm12
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm5[0,1,2],ymm12[3,4,5,6,7],ymm5[8,9,10],ymm12[11,12,13,14,15]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm12[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[1,7,13],zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[3,9,15],zero,zero,xmm9[1,7,13,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm7, %xmm9, %xmm7
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 {%k3} = ymm6[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[1,7,13],zero,zero,zero,xmm11[5,11]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm10[u,u,u,u,u,u,5,11],zero,zero,zero,xmm10[3,9,15],zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %xmm6, %xmm9, %xmm6
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm8, %ymm1, %ymm9 {%k2}
-; AVX512BW-SLOW-NEXT:    movw $9362, %di # imm = 0x2492
-; AVX512BW-SLOW-NEXT:    kmovd %edi, %k3
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm4, %ymm0, %ymm10 {%k3}
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm11
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm10[2,8,14],zero,zero,xmm10[0,6,12],zero,zero,zero,xmm10[u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm7, %xmm12, %xmm7
-; AVX512BW-SLOW-NEXT:    movl $2095104, %edi # imm = 0x1FF800
-; AVX512BW-SLOW-NEXT:    kmovd %edi, %k4
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm3, %ymm2, %ymm12 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm12[u,u,u,u,u,0,6,12],zero,zero,zero,xmm12[4,10],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm14
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[2,8,14],zero,zero,xmm14[0,6,12]
-; AVX512BW-SLOW-NEXT:    vpor %xmm13, %xmm15, %xmm13
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
-; AVX512BW-SLOW-NEXT:    movl $-2097152, %edi # imm = 0xFFE00000
-; AVX512BW-SLOW-NEXT:    kmovd %edi, %k2
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm13, %ymm7 {%k2}
-; AVX512BW-SLOW-NEXT:    movw $9289, %di # imm = 0x2449
-; AVX512BW-SLOW-NEXT:    kmovd %edi, %k5
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm8, %ymm1 {%k5}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[3,9,15],zero,zero,xmm10[1,7,13],zero,zero,zero,xmm10[u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm8, %xmm10, %xmm8
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm12[u,u,u,u,u,1,7,13],zero,zero,zero,xmm12[5,11],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[3,9,15],zero,zero,xmm14[1,7,13]
-; AVX512BW-SLOW-NEXT:    vpor %xmm9, %xmm10, %xmm9
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm9, %ymm8 {%k2}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm4, %ymm0 {%k1}
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[4,10,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm10, %xmm11, %xmm10
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm9[5,6,7]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm2, %ymm3 {%k3}
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm2
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[2,8,14]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[u,u,u,u,u,2,8,14],zero,zero,xmm3[0,6,12],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %xmm10, %xmm11, %xmm10
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm10, %ymm9 {%k2}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[5,11,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm4, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[5,11],zero,zero,zero,xmm2[3,9,15]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,3,9,15],zero,zero,xmm3[1,7,13],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %xmm1, %xmm2, %xmm1
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k2}
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm5, (%rsi)
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm6, (%rdx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm7, (%rcx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm8, (%r8)
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm9, (%r9)
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm0, (%rax)
-; AVX512BW-SLOW-NEXT:    vzeroupper
-; AVX512BW-SLOW-NEXT:    retq
-;
-; AVX512BW-FAST-LABEL: load_i8_stride6_vf32:
-; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %ymm5
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %ymm0
-; AVX512BW-FAST-NEXT:    vmovdqa 64(%rdi), %ymm1
-; AVX512BW-FAST-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512BW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm2 = ymm1[2,3],mem[2,3]
-; AVX512BW-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm1, %ymm7
-; AVX512BW-FAST-NEXT:    movw $-28124, %r10w # imm = 0x9224
-; AVX512BW-FAST-NEXT:    kmovd %r10d, %k2
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm2, %ymm7, %ymm8 {%k2}
-; AVX512BW-FAST-NEXT:    movw $18724, %r10w # imm = 0x4924
-; AVX512BW-FAST-NEXT:    kmovd %r10d, %k1
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm0, %ymm5, %ymm6 {%k1}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm6[0,6,12],zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm9
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm9[2,8,14],zero,zero,xmm9[0,6,12,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm1, %xmm4, %xmm1
-; AVX512BW-FAST-NEXT:    movl $4192256, %r10d # imm = 0x3FF800
-; AVX512BW-FAST-NEXT:    kmovd %r10d, %k3
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm1 {%k3} = ymm8[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vmovdqa 160(%rdi), %ymm4
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm3, %ymm4, %ymm10 {%k1}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm11
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[0,6,12],zero,zero,zero,xmm11[4,10]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm10[u,u,u,u,u,u,4,10],zero,zero,zero,xmm10[2,8,14],zero,zero
-; AVX512BW-FAST-NEXT:    vpor %xmm12, %xmm13, %xmm12
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [0,1,2,3,4,5,6,7,8,9,10,19,20,21,22,23]
-; AVX512BW-FAST-NEXT:    vpermt2w %ymm12, %ymm13, %ymm1
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[1,7,13],zero,zero,zero,xmm6[5,11],zero,zero,zero,xmm6[u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[3,9,15],zero,zero,xmm9[1,7,13,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm6, %xmm9, %xmm6
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm6 {%k3} = ymm8[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[1,7,13],zero,zero,zero,xmm11[5,11]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm10[u,u,u,u,u,u,5,11],zero,zero,zero,xmm10[3,9,15],zero,zero
-; AVX512BW-FAST-NEXT:    vpor %xmm8, %xmm9, %xmm8
-; AVX512BW-FAST-NEXT:    vpermt2w %ymm8, %ymm13, %ymm6
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm7, %ymm2, %ymm9 {%k2}
-; AVX512BW-FAST-NEXT:    movw $9362, %di # imm = 0x2492
-; AVX512BW-FAST-NEXT:    kmovd %edi, %k3
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm5, %ymm0, %ymm10 {%k3}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm11
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm10[2,8,14],zero,zero,xmm10[0,6,12],zero,zero,zero,xmm10[u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm8, %xmm12, %xmm8
-; AVX512BW-FAST-NEXT:    movl $2095104, %edi # imm = 0x1FF800
-; AVX512BW-FAST-NEXT:    kmovd %edi, %k4
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm8 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm4, %ymm3, %ymm12 {%k1}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm12[u,u,u,u,u,0,6,12],zero,zero,zero,xmm12[4,10],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm14
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[2,8,14],zero,zero,xmm14[0,6,12]
-; AVX512BW-FAST-NEXT:    vpor %xmm13, %xmm15, %xmm13
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
-; AVX512BW-FAST-NEXT:    movl $-2097152, %edi # imm = 0xFFE00000
-; AVX512BW-FAST-NEXT:    kmovd %edi, %k2
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm13, %ymm8 {%k2}
-; AVX512BW-FAST-NEXT:    movw $9289, %di # imm = 0x2449
-; AVX512BW-FAST-NEXT:    kmovd %edi, %k5
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm7, %ymm2 {%k5}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[3,9,15],zero,zero,xmm10[1,7,13],zero,zero,zero,xmm10[u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm7, %xmm10, %xmm7
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm7 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm12[u,u,u,u,u,1,7,13],zero,zero,zero,xmm12[5,11],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[3,9,15],zero,zero,xmm14[1,7,13]
-; AVX512BW-FAST-NEXT:    vpor %xmm9, %xmm10, %xmm9
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm9, %ymm7 {%k2}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm5, %ymm0 {%k1}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm5
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,xmm5[0,6,12],zero,zero,zero,xmm5[4,10,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm10, %xmm11, %xmm10
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm9[5,6,7]
-; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm3, %ymm4 {%k3}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm3
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm3[u,u,u,u,u],zero,zero,zero,xmm3[4,10],zero,zero,zero,xmm3[2,8,14]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm4[u,u,u,u,u,2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpor %xmm10, %xmm11, %xmm10
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm10, %ymm9 {%k2}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,xmm5[1,7,13],zero,zero,zero,xmm5[5,11,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm5, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm2[5,6,7]
-; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u],zero,zero,zero,xmm3[5,11],zero,zero,zero,xmm3[3,9,15]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm4[u,u,u,u,u,3,9,15],zero,zero,xmm4[1,7,13],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm2, %ymm0 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm1, (%rsi)
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm6, (%rdx)
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm8, (%rcx)
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm7, (%r8)
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm9, (%r9)
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm0, (%rax)
-; AVX512BW-FAST-NEXT:    vzeroupper
-; AVX512BW-FAST-NEXT:    retq
+; AVX512BW-LABEL: load_i8_stride6_vf32:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm4
+; AVX512BW-NEXT:    vmovdqa 32(%rdi), %ymm0
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %ymm3
+; AVX512BW-NEXT:    vmovdqa 128(%rdi), %ymm2
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm1 = ymm3[2,3],mem[2,3]
+; AVX512BW-NEXT:    vinserti128 $1, 96(%rdi), %ymm3, %ymm8
+; AVX512BW-NEXT:    movw $-28124, %r10w # imm = 0x9224
+; AVX512BW-NEXT:    kmovd %r10d, %k2
+; AVX512BW-NEXT:    vpblendmw %ymm1, %ymm8, %ymm6 {%k2}
+; AVX512BW-NEXT:    movw $18724, %r10w # imm = 0x4924
+; AVX512BW-NEXT:    kmovd %r10d, %k1
+; AVX512BW-NEXT:    vpblendmw %ymm0, %ymm4, %ymm7 {%k1}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm7[0,6,12],zero,zero,zero,xmm7[4,10],zero,zero,zero,xmm7[u,u,u,u,u]
+; AVX512BW-NEXT:    vextracti128 $1, %ymm7, %xmm9
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm9[2,8,14],zero,zero,xmm9[0,6,12,u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm3, %xmm5, %xmm5
+; AVX512BW-NEXT:    movl $4192256, %r10d # imm = 0x3FF800
+; AVX512BW-NEXT:    kmovd %r10d, %k3
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm5 {%k3} = ymm6[u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vmovdqa 160(%rdi), %ymm3
+; AVX512BW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm10 {%k1}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm10, %xmm11
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[0,6,12],zero,zero,zero,xmm11[4,10]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm10[u,u,u,u,u,u,4,10],zero,zero,zero,xmm10[2,8,14],zero,zero
+; AVX512BW-NEXT:    vpor %xmm12, %xmm13, %xmm12
+; AVX512BW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm5[0,1,2],ymm12[3,4,5,6,7],ymm5[8,9,10],ymm12[11,12,13,14,15]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm12[4,5,6,7]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[1,7,13],zero,zero,zero,xmm7[5,11],zero,zero,zero,xmm7[u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[3,9,15],zero,zero,xmm9[1,7,13,u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm7, %xmm9, %xmm7
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm7 {%k3} = ymm6[u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm11[u,u,u,u,u,u],zero,zero,xmm11[1,7,13],zero,zero,zero,xmm11[5,11]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm10[u,u,u,u,u,u,5,11],zero,zero,zero,xmm10[3,9,15],zero,zero
+; AVX512BW-NEXT:    vpor %xmm6, %xmm9, %xmm6
+; AVX512BW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0,1,2],ymm6[3,4,5,6,7],ymm7[8,9,10],ymm6[11,12,13,14,15]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7]
+; AVX512BW-NEXT:    vpblendmw %ymm8, %ymm1, %ymm9 {%k2}
+; AVX512BW-NEXT:    movw $9362, %di # imm = 0x2492
+; AVX512BW-NEXT:    kmovd %edi, %k3
+; AVX512BW-NEXT:    vpblendmw %ymm4, %ymm0, %ymm10 {%k3}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm10, %xmm11
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm11[4,10],zero,zero,zero,xmm11[2,8,14,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm10[2,8,14],zero,zero,xmm10[0,6,12],zero,zero,zero,xmm10[u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm7, %xmm12, %xmm7
+; AVX512BW-NEXT:    movl $2095104, %edi # imm = 0x1FF800
+; AVX512BW-NEXT:    kmovd %edi, %k4
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm7 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,18,24,30,20,26,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpblendmw %ymm3, %ymm2, %ymm12 {%k1}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm12[u,u,u,u,u,0,6,12],zero,zero,zero,xmm12[4,10],zero,zero,zero
+; AVX512BW-NEXT:    vextracti128 $1, %ymm12, %xmm14
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[2,8,14],zero,zero,xmm14[0,6,12]
+; AVX512BW-NEXT:    vpor %xmm13, %xmm15, %xmm13
+; AVX512BW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX512BW-NEXT:    movl $-2097152, %edi # imm = 0xFFE00000
+; AVX512BW-NEXT:    kmovd %edi, %k2
+; AVX512BW-NEXT:    vmovdqu8 %ymm13, %ymm7 {%k2}
+; AVX512BW-NEXT:    movw $9289, %di # imm = 0x2449
+; AVX512BW-NEXT:    kmovd %edi, %k5
+; AVX512BW-NEXT:    vmovdqu16 %ymm8, %ymm1 {%k5}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm11[5,11],zero,zero,zero,xmm11[3,9,15,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[3,9,15],zero,zero,xmm10[1,7,13],zero,zero,zero,xmm10[u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm8, %xmm10, %xmm8
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm8 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,19,25,31,21,27,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm12[u,u,u,u,u,1,7,13],zero,zero,zero,xmm12[5,11],zero,zero,zero
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,u],zero,zero,zero,xmm14[3,9,15],zero,zero,xmm14[1,7,13]
+; AVX512BW-NEXT:    vpor %xmm9, %xmm10, %xmm9
+; AVX512BW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512BW-NEXT:    vmovdqu8 %ymm9, %ymm8 {%k2}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,20,26,16,22,28,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vmovdqu16 %ymm4, %ymm0 {%k1}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[4,10,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm0[4,10],zero,zero,zero,xmm0[2,8,14],zero,zero,xmm0[u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm10, %xmm11, %xmm10
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm9[5,6,7]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu16 %ymm2, %ymm3 {%k3}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm3, %xmm2
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[2,8,14]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[u,u,u,u,u,2,8,14],zero,zero,xmm3[0,6,12],zero,zero,zero
+; AVX512BW-NEXT:    vpor %xmm10, %xmm11, %xmm10
+; AVX512BW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512BW-NEXT:    vmovdqu8 %ymm10, %ymm9 {%k2}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,21,27,17,23,29,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[5,11,u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[5,11],zero,zero,zero,xmm0[3,9,15],zero,zero,xmm0[u,u,u,u,u,u]
+; AVX512BW-NEXT:    vpor %xmm4, %xmm0, %xmm0
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5,6,7]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm2[u,u,u,u,u],zero,zero,zero,xmm2[5,11],zero,zero,zero,xmm2[3,9,15]
+; AVX512BW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,3,9,15],zero,zero,xmm3[1,7,13],zero,zero,zero
+; AVX512BW-NEXT:    vpor %xmm1, %xmm2, %xmm1
+; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k2}
+; AVX512BW-NEXT:    vmovdqa %ymm5, (%rsi)
+; AVX512BW-NEXT:    vmovdqa %ymm6, (%rdx)
+; AVX512BW-NEXT:    vmovdqa %ymm7, (%rcx)
+; AVX512BW-NEXT:    vmovdqa %ymm8, (%r8)
+; AVX512BW-NEXT:    vmovdqa %ymm9, (%r9)
+; AVX512BW-NEXT:    vmovdqa %ymm0, (%rax)
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
   %wide.vec = load <192 x i8>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <32 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42, i32 48, i32 54, i32 60, i32 66, i32 72, i32 78, i32 84, i32 90, i32 96, i32 102, i32 108, i32 114, i32 120, i32 126, i32 132, i32 138, i32 144, i32 150, i32 156, i32 162, i32 168, i32 174, i32 180, i32 186>
   %strided.vec1 = shufflevector <192 x i8> %wide.vec, <192 x i8> poison, <32 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43, i32 49, i32 55, i32 61, i32 67, i32 73, i32 79, i32 85, i32 91, i32 97, i32 103, i32 109, i32 115, i32 121, i32 127, i32 133, i32 139, i32 145, i32 151, i32 157, i32 163, i32 169, i32 175, i32 181, i32 187>
@@ -4682,7 +4577,7 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vmovdqa %ymm1, %ymm4
 ; AVX512F-NEXT:    vpternlogq $202, %ymm10, %ymm25, %ymm4
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[2,8,14,4,10,16,22,28,18,24,30,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[2,8,14,4,10,16,22,28,18,24,30],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-NEXT:    vpternlogq $248, %ymm2, %ymm5, %ymm0
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512F-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm0 # 64-byte Folded Reload
@@ -4692,7 +4587,7 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vinserti32x4 $1, %xmm26, %ymm0, %ymm15
 ; AVX512F-NEXT:    vpblendw {{.*#+}} ymm15 = ymm0[0,1,2],ymm15[3,4,5,6,7],ymm0[8,9,10],ymm15[11,12,13,14,15]
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm15[4,5,6,7]
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[3,9,15,5,11,17,23,29,19,25,31,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[3,9,15,5,11,17,23,29,19,25,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2, %ymm15 # 32-byte Folded Reload
 ; AVX512F-NEXT:    vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 16-byte Folded Reload
 ; AVX512F-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 16-byte Folded Reload
@@ -4819,474 +4714,241 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-NEXT:    vzeroupper
 ; AVX512F-NEXT:    retq
 ;
-; AVX512BW-SLOW-LABEL: load_i8_stride6_vf64:
-; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vmovdqa 224(%rdi), %ymm0
-; AVX512BW-SLOW-NEXT:    vmovdqa64 192(%rdi), %ymm23
-; AVX512BW-SLOW-NEXT:    movw $18724, %r10w # imm = 0x4924
-; AVX512BW-SLOW-NEXT:    kmovd %r10d, %k1
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm0, %ymm23, %ymm9 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm12
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm12, %xmm3
-; AVX512BW-SLOW-NEXT:    vpor %xmm1, %xmm3, %xmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %ymm10
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512BW-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm6
-; AVX512BW-SLOW-NEXT:    vmovdqa64 128(%rdi), %ymm26
-; AVX512BW-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm1
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm26, %ymm1, %ymm15 {%k1}
-; AVX512BW-SLOW-NEXT:    vextracti32x4 $1, %ymm15, %xmm16
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm16, %xmm11
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm18 = <u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm18, %xmm15, %xmm13
-; AVX512BW-SLOW-NEXT:    vpor %xmm11, %xmm13, %xmm11
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm5, %zmm11, %zmm11
-; AVX512BW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm6[2,3],mem[2,3]
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, 96(%rdi), %ymm6, %ymm13
-; AVX512BW-SLOW-NEXT:    movw $-28124, %r10w # imm = 0x9224
-; AVX512BW-SLOW-NEXT:    kmovd %r10d, %k4
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm5, %ymm13, %ymm19 {%k4}
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm3, %ymm10, %ymm20 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm20, %xmm2
-; AVX512BW-SLOW-NEXT:    vextracti32x4 $1, %ymm20, %xmm21
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm21, %xmm4
-; AVX512BW-SLOW-NEXT:    vpor %xmm2, %xmm4, %xmm2
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,0,6,12,2,8,14,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    movl $4192256, %r10d # imm = 0x3FF800
-; AVX512BW-SLOW-NEXT:    kmovd %r10d, %k2
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm6, %ymm19, %ymm2 {%k2}
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm11, %zmm2 {%k2}
-; AVX512BW-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm11
-; AVX512BW-SLOW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm11[2,3],mem[2,3]
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, 288(%rdi), %ymm11, %ymm14
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm4, %ymm14, %ymm22 {%k4}
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm6, %ymm22, %ymm7
-; AVX512BW-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm11
-; AVX512BW-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm6
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm11, %ymm6, %ymm24 {%k1}
-; AVX512BW-SLOW-NEXT:    vextracti32x4 $1, %ymm24, %xmm25
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm25, %xmm17
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm18, %xmm24, %xmm18
-; AVX512BW-SLOW-NEXT:    vporq %xmm17, %xmm18, %xmm17
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm8
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
-; AVX512BW-SLOW-NEXT:    movabsq $-8796093022208, %rdi # imm = 0xFFFFF80000000000
-; AVX512BW-SLOW-NEXT:    kmovq %rdi, %k3
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm7, %zmm2 {%k3}
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm9, %xmm8
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpor %xmm8, %xmm12, %xmm8
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm16, %xmm16
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm15, %xmm15
-; AVX512BW-SLOW-NEXT:    vporq %xmm16, %xmm15, %xmm15
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm8, %zmm15, %zmm8
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm20, %xmm7
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm21, %xmm9
-; AVX512BW-SLOW-NEXT:    vpor %xmm7, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,1,7,13,3,9,15,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm7, %ymm19, %ymm9 {%k2}
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm8, %zmm9 {%k2}
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm7, %ymm22, %ymm7
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm25, %xmm8
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm24, %xmm12
-; AVX512BW-SLOW-NEXT:    vpor %xmm8, %xmm12, %xmm8
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm7, %zmm9 {%k3}
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm13, %ymm5, %ymm15 {%k4}
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    movw $9362, %di # imm = 0x2492
-; AVX512BW-SLOW-NEXT:    kmovd %edi, %k2
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm10, %ymm3, %ymm8 {%k2}
-; AVX512BW-SLOW-NEXT:    vextracti32x4 $1, %ymm8, %xmm16
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm16, %xmm12
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm8, %xmm18
-; AVX512BW-SLOW-NEXT:    vporq %xmm12, %xmm18, %xmm18
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm19 = <u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,2,8,14,4,10,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    movl $2095104, %edi # imm = 0x1FF800
-; AVX512BW-SLOW-NEXT:    kmovd %edi, %k5
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm19, %ymm15, %ymm18 {%k5}
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm23, %ymm0, %ymm20 {%k2}
-; AVX512BW-SLOW-NEXT:    vextracti32x4 $1, %ymm20, %xmm21
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm21, %xmm7
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm20, %xmm12
-; AVX512BW-SLOW-NEXT:    vpor %xmm7, %xmm12, %xmm7
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm1, %ymm26, %ymm17 {%k1}
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm22 = <u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm17, %xmm12
-; AVX512BW-SLOW-NEXT:    vextracti32x4 $1, %ymm17, %xmm24
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm25 = <u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm25, %xmm24, %xmm27
-; AVX512BW-SLOW-NEXT:    vporq %xmm12, %xmm27, %xmm12
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm7, %zmm12, %zmm12
-; AVX512BW-SLOW-NEXT:    movl $2097151, %edi # imm = 0x1FFFFF
-; AVX512BW-SLOW-NEXT:    kmovq %rdi, %k6
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm18, %zmm12 {%k6}
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm14, %ymm4, %ymm7 {%k4}
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm6, %ymm11, %ymm18 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm18, %xmm22
-; AVX512BW-SLOW-NEXT:    vextracti32x4 $1, %ymm18, %xmm27
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm25, %xmm27, %xmm25
-; AVX512BW-SLOW-NEXT:    vporq %xmm22, %xmm25, %xmm22
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm22
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm19, %ymm7, %ymm22 {%k5}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm0, %zmm19
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm19, %zmm12 {%k3}
-; AVX512BW-SLOW-NEXT:    movw $9289, %di # imm = 0x2449
-; AVX512BW-SLOW-NEXT:    kmovd %edi, %k4
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm14, %ymm4 {%k4}
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm13, %ymm5 {%k4}
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm13, %xmm16, %xmm14
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm16 = <3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm16, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vpor %xmm14, %xmm8, %xmm8
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,3,9,15,5,11,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm14, %ymm15, %ymm8 {%k5}
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm13, %xmm21, %xmm13
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm16, %xmm20, %xmm15
-; AVX512BW-SLOW-NEXT:    vpor %xmm13, %xmm15, %xmm13
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm17, %xmm16
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm24, %xmm19
-; AVX512BW-SLOW-NEXT:    vporq %xmm16, %xmm19, %xmm16
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $1, %xmm16, %ymm0, %ymm16
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm13, %zmm16, %zmm13
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm8, %zmm13 {%k6}
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm18, %xmm8
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm27, %xmm15
-; AVX512BW-SLOW-NEXT:    vpor %xmm8, %xmm15, %xmm8
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm14, %ymm7, %ymm8 {%k5}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm7
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm7, %zmm13 {%k3}
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,4,10,0,6,12,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm7, %ymm5, %ymm8
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm10, %ymm3 {%k1}
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm15
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm15, %xmm10
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm16 = <4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm16, %xmm3, %xmm17
-; AVX512BW-SLOW-NEXT:    vporq %xmm10, %xmm17, %xmm10
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm8[5,6,7]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm23, %ymm0 {%k1}
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm8
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm8, %xmm14
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm16, %xmm0, %xmm16
-; AVX512BW-SLOW-NEXT:    vporq %xmm14, %xmm16, %xmm14
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm26, %ymm1 {%k2}
-; AVX512BW-SLOW-NEXT:    vextracti32x4 $1, %ymm1, %xmm16
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm16, %xmm18
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm19 = <u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm19, %xmm1, %xmm20
-; AVX512BW-SLOW-NEXT:    vporq %xmm18, %xmm20, %xmm18
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $1, %xmm18, %ymm0, %ymm18
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm14, %zmm18, %zmm14
-; AVX512BW-SLOW-NEXT:    movabsq $4398044413952, %rdi # imm = 0x3FFFFE00000
-; AVX512BW-SLOW-NEXT:    kmovq %rdi, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm14, %zmm10 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm7, %ymm4, %ymm7
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm11, %ymm6 {%k2}
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm11
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm11, %xmm14
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm19, %xmm6, %xmm17
-; AVX512BW-SLOW-NEXT:    vporq %xmm14, %xmm17, %xmm14
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512BW-SLOW-NEXT:    movl $-2097152, %edi # imm = 0xFFE00000
-; AVX512BW-SLOW-NEXT:    kmovd %edi, %k2
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm14, %ymm7 {%k2}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm7, %zmm10 {%k2}
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,5,11,1,7,13,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm7, %ymm5, %ymm5
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm15, %xmm15
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpor %xmm3, %xmm15, %xmm3
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm5[5,6,7]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm8, %xmm5
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpor %xmm5, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm16, %xmm8
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
-; AVX512BW-SLOW-NEXT:    vpor %xmm1, %xmm8, %xmm1
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm3 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm7, %ymm4, %ymm0
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm11, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm6, %xmm4
-; AVX512BW-SLOW-NEXT:    vpor %xmm1, %xmm4, %xmm1
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k2}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm0, %zmm3 {%k2}
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm2, (%rsi)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm9, (%rdx)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm12, (%rcx)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm13, (%r8)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm10, (%r9)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512BW-SLOW-NEXT:    vzeroupper
-; AVX512BW-SLOW-NEXT:    retq
-;
-; AVX512BW-FAST-LABEL: load_i8_stride6_vf64:
-; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vmovdqa 224(%rdi), %ymm0
-; AVX512BW-FAST-NEXT:    vmovdqa 192(%rdi), %ymm7
-; AVX512BW-FAST-NEXT:    movw $18724, %r10w # imm = 0x4924
-; AVX512BW-FAST-NEXT:    kmovd %r10d, %k1
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm0, %ymm7, %ymm9 {%k1}
-; AVX512BW-FAST-NEXT:    vpshufb %xmm2, %xmm9, %xmm1
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm9, %xmm12
-; AVX512BW-FAST-NEXT:    vpshufb %xmm4, %xmm12, %xmm3
-; AVX512BW-FAST-NEXT:    vpor %xmm1, %xmm3, %xmm5
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %ymm10
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512BW-FAST-NEXT:    vmovdqa 64(%rdi), %ymm6
-; AVX512BW-FAST-NEXT:    vmovdqa 128(%rdi), %ymm8
-; AVX512BW-FAST-NEXT:    vmovdqa 160(%rdi), %ymm1
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm8, %ymm1, %ymm15 {%k1}
-; AVX512BW-FAST-NEXT:    vextracti32x4 $1, %ymm15, %xmm16
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm17, %xmm16, %xmm11
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm18 = <u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm18, %xmm15, %xmm13
-; AVX512BW-FAST-NEXT:    vpor %xmm11, %xmm13, %xmm11
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm11, %zmm11
-; AVX512BW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm6[2,3],mem[2,3]
-; AVX512BW-FAST-NEXT:    vinserti128 $1, 96(%rdi), %ymm6, %ymm13
-; AVX512BW-FAST-NEXT:    movw $-28124, %r10w # imm = 0x9224
-; AVX512BW-FAST-NEXT:    kmovd %r10d, %k4
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm5, %ymm13, %ymm19 {%k4}
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm3, %ymm10, %ymm20 {%k1}
-; AVX512BW-FAST-NEXT:    vpshufb %xmm2, %xmm20, %xmm2
-; AVX512BW-FAST-NEXT:    vextracti32x4 $1, %ymm20, %xmm21
-; AVX512BW-FAST-NEXT:    vpshufb %xmm4, %xmm21, %xmm4
-; AVX512BW-FAST-NEXT:    vpor %xmm2, %xmm4, %xmm2
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,0,6,12,2,8,14,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    movl $4192256, %r10d # imm = 0x3FF800
-; AVX512BW-FAST-NEXT:    kmovd %r10d, %k2
-; AVX512BW-FAST-NEXT:    vpshufb %ymm6, %ymm19, %ymm2 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm11, %zmm2 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqa 256(%rdi), %ymm11
-; AVX512BW-FAST-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm11[2,3],mem[2,3]
-; AVX512BW-FAST-NEXT:    vinserti128 $1, 288(%rdi), %ymm11, %ymm14
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm4, %ymm14, %ymm22 {%k4}
-; AVX512BW-FAST-NEXT:    vpshufb %ymm6, %ymm22, %ymm23
-; AVX512BW-FAST-NEXT:    vmovdqa 320(%rdi), %ymm11
-; AVX512BW-FAST-NEXT:    vmovdqa 352(%rdi), %ymm6
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm11, %ymm6, %ymm24 {%k1}
-; AVX512BW-FAST-NEXT:    vextracti32x4 $1, %ymm24, %xmm25
-; AVX512BW-FAST-NEXT:    vpshufb %xmm17, %xmm25, %xmm17
-; AVX512BW-FAST-NEXT:    vpshufb %xmm18, %xmm24, %xmm18
-; AVX512BW-FAST-NEXT:    vporq %xmm17, %xmm18, %xmm17
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [0,1,2,3,4,5,6,7,8,9,10,19,20,21,22,23]
-; AVX512BW-FAST-NEXT:    vpermt2w %ymm17, %ymm18, %ymm23
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm23, %zmm0, %zmm17
-; AVX512BW-FAST-NEXT:    movabsq $-8796093022208, %rdi # imm = 0xFFFFF80000000000
-; AVX512BW-FAST-NEXT:    kmovq %rdi, %k3
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm17, %zmm2 {%k3}
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm17, %xmm9, %xmm9
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm23 = <128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm23, %xmm12, %xmm12
-; AVX512BW-FAST-NEXT:    vpor %xmm9, %xmm12, %xmm9
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm16, %xmm16
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm26 = <u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm26, %xmm15, %xmm15
-; AVX512BW-FAST-NEXT:    vporq %xmm16, %xmm15, %xmm15
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
-; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm9, %zmm15, %zmm15
-; AVX512BW-FAST-NEXT:    vpshufb %xmm17, %xmm20, %xmm9
-; AVX512BW-FAST-NEXT:    vpshufb %xmm23, %xmm21, %xmm16
-; AVX512BW-FAST-NEXT:    vporq %xmm9, %xmm16, %xmm9
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,1,7,13,3,9,15,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %ymm16, %ymm19, %ymm9 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm15, %zmm9 {%k2}
-; AVX512BW-FAST-NEXT:    vpshufb %ymm16, %ymm22, %ymm15
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm25, %xmm12
-; AVX512BW-FAST-NEXT:    vpshufb %xmm26, %xmm24, %xmm16
-; AVX512BW-FAST-NEXT:    vporq %xmm12, %xmm16, %xmm12
-; AVX512BW-FAST-NEXT:    vpermt2w %ymm12, %ymm18, %ymm15
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm0, %zmm12
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm12, %zmm9 {%k3}
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm13, %ymm5, %ymm15 {%k4}
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    movw $9362, %di # imm = 0x2492
-; AVX512BW-FAST-NEXT:    kmovd %edi, %k2
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm10, %ymm3, %ymm16 {%k2}
-; AVX512BW-FAST-NEXT:    vextracti32x4 $1, %ymm16, %xmm17
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm17, %xmm18
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm19 = <2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm19, %xmm16, %xmm20
-; AVX512BW-FAST-NEXT:    vporq %xmm18, %xmm20, %xmm18
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = <u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,2,8,14,4,10,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    movl $2095104, %edi # imm = 0x1FF800
-; AVX512BW-FAST-NEXT:    kmovd %edi, %k5
-; AVX512BW-FAST-NEXT:    vpshufb %ymm20, %ymm15, %ymm18 {%k5}
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm7, %ymm0, %ymm21 {%k2}
-; AVX512BW-FAST-NEXT:    vextracti32x4 $1, %ymm21, %xmm22
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm22, %xmm12
-; AVX512BW-FAST-NEXT:    vpshufb %xmm19, %xmm21, %xmm19
-; AVX512BW-FAST-NEXT:    vporq %xmm12, %xmm19, %xmm12
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm1, %ymm8, %ymm19 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm23 = <u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm23, %xmm19, %xmm24
-; AVX512BW-FAST-NEXT:    vextracti32x4 $1, %ymm19, %xmm25
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm26 = <u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm26, %xmm25, %xmm27
-; AVX512BW-FAST-NEXT:    vporq %xmm24, %xmm27, %xmm24
-; AVX512BW-FAST-NEXT:    vinserti32x4 $1, %xmm24, %ymm0, %ymm24
-; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm12, %zmm24, %zmm12
-; AVX512BW-FAST-NEXT:    movl $2097151, %edi # imm = 0x1FFFFF
-; AVX512BW-FAST-NEXT:    kmovq %rdi, %k6
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm18, %zmm12 {%k6}
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm14, %ymm4, %ymm18 {%k4}
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm6, %ymm11, %ymm24 {%k1}
-; AVX512BW-FAST-NEXT:    vpshufb %xmm23, %xmm24, %xmm23
-; AVX512BW-FAST-NEXT:    vextracti32x4 $1, %ymm24, %xmm27
-; AVX512BW-FAST-NEXT:    vpshufb %xmm26, %xmm27, %xmm26
-; AVX512BW-FAST-NEXT:    vporq %xmm23, %xmm26, %xmm23
-; AVX512BW-FAST-NEXT:    vinserti32x4 $1, %xmm23, %ymm0, %ymm23
-; AVX512BW-FAST-NEXT:    vpshufb %ymm20, %ymm18, %ymm23 {%k5}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm23, %zmm0, %zmm20
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm20, %zmm12 {%k3}
-; AVX512BW-FAST-NEXT:    movw $9289, %di # imm = 0x2449
-; AVX512BW-FAST-NEXT:    kmovd %edi, %k4
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm14, %ymm4 {%k4}
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm13, %ymm5 {%k4}
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm13, %xmm17, %xmm14
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm17, %xmm16, %xmm16
-; AVX512BW-FAST-NEXT:    vporq %xmm14, %xmm16, %xmm14
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = <u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,3,9,15,5,11,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %ymm16, %ymm15, %ymm14 {%k5}
-; AVX512BW-FAST-NEXT:    vpshufb %xmm13, %xmm22, %xmm13
-; AVX512BW-FAST-NEXT:    vpshufb %xmm17, %xmm21, %xmm15
-; AVX512BW-FAST-NEXT:    vpor %xmm13, %xmm15, %xmm13
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm15, %xmm19, %xmm17
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm19 = <u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm19, %xmm25, %xmm20
-; AVX512BW-FAST-NEXT:    vporq %xmm17, %xmm20, %xmm17
-; AVX512BW-FAST-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm17
-; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm13, %zmm17, %zmm13
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm14, %zmm13 {%k6}
-; AVX512BW-FAST-NEXT:    vpshufb %xmm15, %xmm24, %xmm14
-; AVX512BW-FAST-NEXT:    vpshufb %xmm19, %xmm27, %xmm15
-; AVX512BW-FAST-NEXT:    vpor %xmm14, %xmm15, %xmm14
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512BW-FAST-NEXT:    vpshufb %ymm16, %ymm18, %ymm14 {%k5}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm0, %zmm14
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm14, %zmm13 {%k3}
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,4,10,0,6,12,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %ymm14, %ymm5, %ymm15
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm16 = <128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm10, %ymm3 {%k1}
-; AVX512BW-FAST-NEXT:    vextracti32x4 $1, %ymm3, %xmm17
-; AVX512BW-FAST-NEXT:    vpshufb %xmm16, %xmm17, %xmm10
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm18 = <4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm18, %xmm3, %xmm19
-; AVX512BW-FAST-NEXT:    vporq %xmm10, %xmm19, %xmm10
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm15[5,6,7]
-; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm15[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm7, %ymm0 {%k1}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm7
-; AVX512BW-FAST-NEXT:    vpshufb %xmm16, %xmm7, %xmm15
-; AVX512BW-FAST-NEXT:    vpshufb %xmm18, %xmm0, %xmm16
-; AVX512BW-FAST-NEXT:    vporq %xmm15, %xmm16, %xmm15
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm8, %ymm1 {%k2}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm8
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm16 = <u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm16, %xmm8, %xmm18
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm19 = <u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm19, %xmm1, %xmm20
-; AVX512BW-FAST-NEXT:    vporq %xmm18, %xmm20, %xmm18
-; AVX512BW-FAST-NEXT:    vinserti32x4 $1, %xmm18, %ymm0, %ymm18
-; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm15, %zmm18, %zmm15
-; AVX512BW-FAST-NEXT:    movabsq $4398044413952, %rdi # imm = 0x3FFFFE00000
-; AVX512BW-FAST-NEXT:    kmovq %rdi, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm15, %zmm10 {%k1}
-; AVX512BW-FAST-NEXT:    vpshufb %ymm14, %ymm4, %ymm14
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm11, %ymm6 {%k2}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm11
-; AVX512BW-FAST-NEXT:    vpshufb %xmm16, %xmm11, %xmm15
-; AVX512BW-FAST-NEXT:    vpshufb %xmm19, %xmm6, %xmm16
-; AVX512BW-FAST-NEXT:    vporq %xmm15, %xmm16, %xmm15
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
-; AVX512BW-FAST-NEXT:    movl $-2097152, %edi # imm = 0xFFE00000
-; AVX512BW-FAST-NEXT:    kmovd %edi, %k2
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm15, %ymm14 {%k2}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm0, %zmm14
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm14, %zmm10 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,5,11,1,7,13,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %ymm14, %ymm5, %ymm5
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm15, %xmm17, %xmm16
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm17, %xmm3, %xmm3
-; AVX512BW-FAST-NEXT:    vporq %xmm16, %xmm3, %xmm3
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm5[5,6,7]
-; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpshufb %xmm15, %xmm7, %xmm5
-; AVX512BW-FAST-NEXT:    vpshufb %xmm17, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpor %xmm5, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm5, %xmm8, %xmm7
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vpor %xmm7, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm3 {%k1}
-; AVX512BW-FAST-NEXT:    vpshufb %ymm14, %ymm4, %ymm0
-; AVX512BW-FAST-NEXT:    vpshufb %xmm5, %xmm11, %xmm1
-; AVX512BW-FAST-NEXT:    vpshufb %xmm8, %xmm6, %xmm4
-; AVX512BW-FAST-NEXT:    vpor %xmm1, %xmm4, %xmm1
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k2}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm0, %zmm3 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm2, (%rsi)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm9, (%rdx)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm12, (%rcx)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm13, (%r8)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm10, (%r9)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512BW-FAST-NEXT:    vzeroupper
-; AVX512BW-FAST-NEXT:    retq
+; AVX512BW-LABEL: load_i8_stride6_vf64:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm2 = <0,6,12,128,128,128,4,10,128,128,128,u,u,u,u,u>
+; AVX512BW-NEXT:    vmovdqa 224(%rdi), %ymm0
+; AVX512BW-NEXT:    vmovdqa64 192(%rdi), %ymm23
+; AVX512BW-NEXT:    movw $18724, %r10w # imm = 0x4924
+; AVX512BW-NEXT:    kmovd %r10d, %k1
+; AVX512BW-NEXT:    vpblendmw %ymm0, %ymm23, %ymm9 {%k1}
+; AVX512BW-NEXT:    vpshufb %xmm2, %xmm9, %xmm1
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm4 = <128,128,128,2,8,14,128,128,0,6,12,u,u,u,u,u>
+; AVX512BW-NEXT:    vextracti128 $1, %ymm9, %xmm12
+; AVX512BW-NEXT:    vpshufb %xmm4, %xmm12, %xmm3
+; AVX512BW-NEXT:    vpor %xmm1, %xmm3, %xmm5
+; AVX512BW-NEXT:    vmovdqa (%rdi), %ymm10
+; AVX512BW-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512BW-NEXT:    vmovdqa 64(%rdi), %ymm6
+; AVX512BW-NEXT:    vmovdqa64 128(%rdi), %ymm26
+; AVX512BW-NEXT:    vmovdqa 160(%rdi), %ymm1
+; AVX512BW-NEXT:    vpblendmw %ymm26, %ymm1, %ymm15 {%k1}
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm15, %xmm16
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,u,128,128,0,6,12,128,128,128,4,10>
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm16, %xmm11
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm18 = <u,u,u,u,u,u,4,10,128,128,128,2,8,14,128,128>
+; AVX512BW-NEXT:    vpshufb %xmm18, %xmm15, %xmm13
+; AVX512BW-NEXT:    vpor %xmm11, %xmm13, %xmm11
+; AVX512BW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm5, %zmm11, %zmm11
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm5 = ymm6[2,3],mem[2,3]
+; AVX512BW-NEXT:    vinserti128 $1, 96(%rdi), %ymm6, %ymm13
+; AVX512BW-NEXT:    movw $-28124, %r10w # imm = 0x9224
+; AVX512BW-NEXT:    kmovd %r10d, %k4
+; AVX512BW-NEXT:    vpblendmw %ymm5, %ymm13, %ymm19 {%k4}
+; AVX512BW-NEXT:    vpblendmw %ymm3, %ymm10, %ymm20 {%k1}
+; AVX512BW-NEXT:    vpshufb %xmm2, %xmm20, %xmm2
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm20, %xmm21
+; AVX512BW-NEXT:    vpshufb %xmm4, %xmm21, %xmm4
+; AVX512BW-NEXT:    vpor %xmm2, %xmm4, %xmm2
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,2,8,14,4,10,0,6,12,2,8,14,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT:    movl $4192256, %r10d # imm = 0x3FF800
+; AVX512BW-NEXT:    kmovd %r10d, %k2
+; AVX512BW-NEXT:    vpshufb %ymm6, %ymm19, %ymm2 {%k2}
+; AVX512BW-NEXT:    vmovdqu16 %zmm11, %zmm2 {%k2}
+; AVX512BW-NEXT:    vmovdqa 256(%rdi), %ymm11
+; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm4 = ymm11[2,3],mem[2,3]
+; AVX512BW-NEXT:    vinserti128 $1, 288(%rdi), %ymm11, %ymm14
+; AVX512BW-NEXT:    vpblendmw %ymm4, %ymm14, %ymm22 {%k4}
+; AVX512BW-NEXT:    vpshufb %ymm6, %ymm22, %ymm7
+; AVX512BW-NEXT:    vmovdqa 320(%rdi), %ymm11
+; AVX512BW-NEXT:    vmovdqa 352(%rdi), %ymm6
+; AVX512BW-NEXT:    vpblendmw %ymm11, %ymm6, %ymm24 {%k1}
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm24, %xmm25
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm25, %xmm17
+; AVX512BW-NEXT:    vpshufb %xmm18, %xmm24, %xmm18
+; AVX512BW-NEXT:    vporq %xmm17, %xmm18, %xmm17
+; AVX512BW-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm8
+; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
+; AVX512BW-NEXT:    movabsq $-8796093022208, %rdi # imm = 0xFFFFF80000000000
+; AVX512BW-NEXT:    kmovq %rdi, %k3
+; AVX512BW-NEXT:    vmovdqu8 %zmm7, %zmm2 {%k3}
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm7 = <1,7,13,128,128,128,5,11,128,128,128,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %xmm7, %xmm9, %xmm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm9 = <128,128,128,3,9,15,128,128,1,7,13,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %xmm9, %xmm12, %xmm12
+; AVX512BW-NEXT:    vpor %xmm8, %xmm12, %xmm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,u,128,128,1,7,13,128,128,128,5,11>
+; AVX512BW-NEXT:    vpshufb %xmm12, %xmm16, %xmm16
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,u,5,11,128,128,128,3,9,15,128,128>
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm15, %xmm15
+; AVX512BW-NEXT:    vporq %xmm16, %xmm15, %xmm15
+; AVX512BW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm8, %zmm15, %zmm8
+; AVX512BW-NEXT:    vpshufb %xmm7, %xmm20, %xmm7
+; AVX512BW-NEXT:    vpshufb %xmm9, %xmm21, %xmm9
+; AVX512BW-NEXT:    vpor %xmm7, %xmm9, %xmm9
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,3,9,15,5,11,1,7,13,3,9,15,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %ymm7, %ymm19, %ymm9 {%k2}
+; AVX512BW-NEXT:    vmovdqu16 %zmm8, %zmm9 {%k2}
+; AVX512BW-NEXT:    vpshufb %ymm7, %ymm22, %ymm7
+; AVX512BW-NEXT:    vpshufb %xmm12, %xmm25, %xmm8
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm24, %xmm12
+; AVX512BW-NEXT:    vpor %xmm8, %xmm12, %xmm8
+; AVX512BW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512BW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm7[0,1,2],ymm8[3,4,5,6,7],ymm7[8,9,10],ymm8[11,12,13,14,15]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
+; AVX512BW-NEXT:    vmovdqu8 %zmm7, %zmm9 {%k3}
+; AVX512BW-NEXT:    vpblendmw %ymm13, %ymm5, %ymm15 {%k4}
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm7 = <128,128,128,4,10,128,128,128,2,8,14,u,u,u,u,u>
+; AVX512BW-NEXT:    movw $9362, %di # imm = 0x2492
+; AVX512BW-NEXT:    kmovd %edi, %k2
+; AVX512BW-NEXT:    vpblendmw %ymm10, %ymm3, %ymm8 {%k2}
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm8, %xmm16
+; AVX512BW-NEXT:    vpshufb %xmm7, %xmm16, %xmm12
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <2,8,14,128,128,0,6,12,128,128,128,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm8, %xmm18
+; AVX512BW-NEXT:    vporq %xmm12, %xmm18, %xmm18
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} ymm19 = <u,u,u,u,u,u,u,u,u,u,u,4,10,0,6,12,2,8,14,4,10,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT:    movl $2095104, %edi # imm = 0x1FF800
+; AVX512BW-NEXT:    kmovd %edi, %k5
+; AVX512BW-NEXT:    vpshufb %ymm19, %ymm15, %ymm18 {%k5}
+; AVX512BW-NEXT:    vpblendmw %ymm23, %ymm0, %ymm20 {%k2}
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm20, %xmm21
+; AVX512BW-NEXT:    vpshufb %xmm7, %xmm21, %xmm7
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm20, %xmm12
+; AVX512BW-NEXT:    vpor %xmm7, %xmm12, %xmm7
+; AVX512BW-NEXT:    vpblendmw %ymm1, %ymm26, %ymm17 {%k1}
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm22 = <u,u,u,u,u,0,6,12,128,128,128,4,10,128,128,128>
+; AVX512BW-NEXT:    vpshufb %xmm22, %xmm17, %xmm12
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm17, %xmm24
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm25 = <u,u,u,u,u,128,128,128,2,8,14,128,128,0,6,12>
+; AVX512BW-NEXT:    vpshufb %xmm25, %xmm24, %xmm27
+; AVX512BW-NEXT:    vporq %xmm12, %xmm27, %xmm12
+; AVX512BW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm7, %zmm12, %zmm12
+; AVX512BW-NEXT:    movl $2097151, %edi # imm = 0x1FFFFF
+; AVX512BW-NEXT:    kmovq %rdi, %k6
+; AVX512BW-NEXT:    vmovdqu8 %zmm18, %zmm12 {%k6}
+; AVX512BW-NEXT:    vpblendmw %ymm14, %ymm4, %ymm7 {%k4}
+; AVX512BW-NEXT:    vpblendmw %ymm6, %ymm11, %ymm18 {%k1}
+; AVX512BW-NEXT:    vpshufb %xmm22, %xmm18, %xmm22
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm18, %xmm27
+; AVX512BW-NEXT:    vpshufb %xmm25, %xmm27, %xmm25
+; AVX512BW-NEXT:    vporq %xmm22, %xmm25, %xmm22
+; AVX512BW-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm22
+; AVX512BW-NEXT:    vpshufb %ymm19, %ymm7, %ymm22 {%k5}
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm22, %zmm0, %zmm19
+; AVX512BW-NEXT:    vmovdqu8 %zmm19, %zmm12 {%k3}
+; AVX512BW-NEXT:    movw $9289, %di # imm = 0x2449
+; AVX512BW-NEXT:    kmovd %edi, %k4
+; AVX512BW-NEXT:    vmovdqu16 %ymm14, %ymm4 {%k4}
+; AVX512BW-NEXT:    vmovdqu16 %ymm13, %ymm5 {%k4}
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm13 = <128,128,128,5,11,128,128,128,3,9,15,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %xmm13, %xmm16, %xmm14
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm16 = <3,9,15,128,128,1,7,13,128,128,128,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %xmm16, %xmm8, %xmm8
+; AVX512BW-NEXT:    vpor %xmm14, %xmm8, %xmm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,u,u,u,u,u,u,u,5,11,1,7,13,3,9,15,5,11,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %ymm14, %ymm15, %ymm8 {%k5}
+; AVX512BW-NEXT:    vpshufb %xmm13, %xmm21, %xmm13
+; AVX512BW-NEXT:    vpshufb %xmm16, %xmm20, %xmm15
+; AVX512BW-NEXT:    vpor %xmm13, %xmm15, %xmm13
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,u,u,u,1,7,13,128,128,128,5,11,128,128,128>
+; AVX512BW-NEXT:    vpshufb %xmm15, %xmm17, %xmm16
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,128,128,128,3,9,15,128,128,1,7,13>
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm24, %xmm19
+; AVX512BW-NEXT:    vporq %xmm16, %xmm19, %xmm16
+; AVX512BW-NEXT:    vinserti32x4 $1, %xmm16, %ymm0, %ymm16
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm13, %zmm16, %zmm13
+; AVX512BW-NEXT:    vmovdqu8 %zmm8, %zmm13 {%k6}
+; AVX512BW-NEXT:    vpshufb %xmm15, %xmm18, %xmm8
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm27, %xmm15
+; AVX512BW-NEXT:    vpor %xmm8, %xmm15, %xmm8
+; AVX512BW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512BW-NEXT:    vpshufb %ymm14, %ymm7, %ymm8 {%k5}
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm7
+; AVX512BW-NEXT:    vmovdqu8 %zmm7, %zmm13 {%k3}
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,0,6,12,2,8,14,4,10,0,6,12,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %ymm7, %ymm5, %ymm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,128,0,6,12,128,128,128,4,10,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vmovdqu16 %ymm10, %ymm3 {%k1}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm3, %xmm15
+; AVX512BW-NEXT:    vpshufb %xmm14, %xmm15, %xmm10
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm16 = <4,10,128,128,128,2,8,14,128,128,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %xmm16, %xmm3, %xmm17
+; AVX512BW-NEXT:    vporq %xmm10, %xmm17, %xmm10
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3,4],xmm8[5,6,7]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm8[4,5,6,7]
+; AVX512BW-NEXT:    vmovdqu16 %ymm23, %ymm0 {%k1}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm8
+; AVX512BW-NEXT:    vpshufb %xmm14, %xmm8, %xmm14
+; AVX512BW-NEXT:    vpshufb %xmm16, %xmm0, %xmm16
+; AVX512BW-NEXT:    vporq %xmm14, %xmm16, %xmm14
+; AVX512BW-NEXT:    vmovdqu16 %ymm26, %ymm1 {%k2}
+; AVX512BW-NEXT:    vextracti32x4 $1, %ymm1, %xmm16
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,u,128,128,128,4,10,128,128,128,2,8,14>
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm16, %xmm18
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm19 = <u,u,u,u,u,2,8,14,128,128,0,6,12,128,128,128>
+; AVX512BW-NEXT:    vpshufb %xmm19, %xmm1, %xmm20
+; AVX512BW-NEXT:    vporq %xmm18, %xmm20, %xmm18
+; AVX512BW-NEXT:    vinserti32x4 $1, %xmm18, %ymm0, %ymm18
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm14, %zmm18, %zmm14
+; AVX512BW-NEXT:    movabsq $4398044413952, %rdi # imm = 0x3FFFFE00000
+; AVX512BW-NEXT:    kmovq %rdi, %k1
+; AVX512BW-NEXT:    vmovdqu8 %zmm14, %zmm10 {%k1}
+; AVX512BW-NEXT:    vpshufb %ymm7, %ymm4, %ymm7
+; AVX512BW-NEXT:    vmovdqu16 %ymm11, %ymm6 {%k2}
+; AVX512BW-NEXT:    vextracti128 $1, %ymm6, %xmm11
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm11, %xmm14
+; AVX512BW-NEXT:    vpshufb %xmm19, %xmm6, %xmm17
+; AVX512BW-NEXT:    vporq %xmm14, %xmm17, %xmm14
+; AVX512BW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512BW-NEXT:    movl $-2097152, %edi # imm = 0xFFE00000
+; AVX512BW-NEXT:    kmovd %edi, %k2
+; AVX512BW-NEXT:    vmovdqu8 %ymm14, %ymm7 {%k2}
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
+; AVX512BW-NEXT:    vmovdqu16 %zmm7, %zmm10 {%k2}
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,1,7,13,3,9,15,5,11,1,7,13,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %ymm7, %ymm5, %ymm5
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,128,1,7,13,128,128,128,5,11,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %xmm14, %xmm15, %xmm15
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <5,11,128,128,128,3,9,15,128,128,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm3, %xmm3
+; AVX512BW-NEXT:    vpor %xmm3, %xmm15, %xmm3
+; AVX512BW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm5[5,6,7]
+; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-NEXT:    vpshufb %xmm14, %xmm8, %xmm5
+; AVX512BW-NEXT:    vpshufb %xmm17, %xmm0, %xmm0
+; AVX512BW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,128,128,128,5,11,128,128,128,3,9,15>
+; AVX512BW-NEXT:    vpshufb %xmm5, %xmm16, %xmm8
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,u,u,3,9,15,128,128,1,7,13,128,128,128>
+; AVX512BW-NEXT:    vpshufb %xmm14, %xmm1, %xmm1
+; AVX512BW-NEXT:    vpor %xmm1, %xmm8, %xmm1
+; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqu8 %zmm0, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpshufb %ymm7, %ymm4, %ymm0
+; AVX512BW-NEXT:    vpshufb %xmm5, %xmm11, %xmm1
+; AVX512BW-NEXT:    vpshufb %xmm14, %xmm6, %xmm4
+; AVX512BW-NEXT:    vpor %xmm1, %xmm4, %xmm1
+; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k2}
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512BW-NEXT:    vmovdqu16 %zmm0, %zmm3 {%k2}
+; AVX512BW-NEXT:    vmovdqa64 %zmm2, (%rsi)
+; AVX512BW-NEXT:    vmovdqa64 %zmm9, (%rdx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm12, (%rcx)
+; AVX512BW-NEXT:    vmovdqa64 %zmm13, (%r8)
+; AVX512BW-NEXT:    vmovdqa64 %zmm10, (%r9)
+; AVX512BW-NEXT:    vmovdqa64 %zmm3, (%rax)
+; AVX512BW-NEXT:    vzeroupper
+; AVX512BW-NEXT:    retq
   %wide.vec = load <384 x i8>, ptr %in.vec, align 64
   %strided.vec0 = shufflevector <384 x i8> %wide.vec, <384 x i8> poison, <64 x i32> <i32 0, i32 6, i32 12, i32 18, i32 24, i32 30, i32 36, i32 42, i32 48, i32 54, i32 60, i32 66, i32 72, i32 78, i32 84, i32 90, i32 96, i32 102, i32 108, i32 114, i32 120, i32 126, i32 132, i32 138, i32 144, i32 150, i32 156, i32 162, i32 168, i32 174, i32 180, i32 186, i32 192, i32 198, i32 204, i32 210, i32 216, i32 222, i32 228, i32 234, i32 240, i32 246, i32 252, i32 258, i32 264, i32 270, i32 276, i32 282, i32 288, i32 294, i32 300, i32 306, i32 312, i32 318, i32 324, i32 330, i32 336, i32 342, i32 348, i32 354, i32 360, i32 366, i32 372, i32 378>
   %strided.vec1 = shufflevector <384 x i8> %wide.vec, <384 x i8> poison, <64 x i32> <i32 1, i32 7, i32 13, i32 19, i32 25, i32 31, i32 37, i32 43, i32 49, i32 55, i32 61, i32 67, i32 73, i32 79, i32 85, i32 91, i32 97, i32 103, i32 109, i32 115, i32 121, i32 127, i32 133, i32 139, i32 145, i32 151, i32 157, i32 163, i32 169, i32 175, i32 181, i32 187, i32 193, i32 199, i32 205, i32 211, i32 217, i32 223, i32 229, i32 235, i32 241, i32 247, i32 253, i32 259, i32 265, i32 271, i32 277, i32 283, i32 289, i32 295, i32 301, i32 307, i32 313, i32 319, i32 325, i32 331, i32 337, i32 343, i32 349, i32 355, i32 361, i32 367, i32 373, i32 379>
@@ -5308,8 +4970,10 @@ define void @load_i8_stride6_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-FAST-PERLANE: {{.*}}
 ; AVX2-SLOW: {{.*}}
 ; AVX512: {{.*}}
+; AVX512BW-FAST: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
+; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
index 7787c084819ac..40686eae642d8 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
@@ -3708,198 +3708,203 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vmovdqa64 128(%rdi), %ymm17
-; AVX512F-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm3
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm2, %ymm1
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[6,13,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm2, %ymm3, %ymm1
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[6,13,u,u,u,u]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,5,12],zero,zero,xmm1[1,8,15],zero,zero,xmm1[u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm3, %xmm1, %xmm12
+; AVX512F-SLOW-NEXT:    vpor %xmm4, %xmm1, %xmm1
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm4, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm20
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm4, %xmm6
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm20
 ; AVX512F-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm5[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [0,1,2,11,0,1,2,11]
-; AVX512F-SLOW-NEXT:    # ymm15 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm15, %ymm12
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm5[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm1[0,1,2,3,4,5,6],ymm6[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm6
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm7
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, %ymm8
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm7, %ymm6, %ymm8
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[5,12],zero,zero,xmm9[1,8,15,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[0,7,14],zero,zero,xmm8[3,10],zero,zero,zero,xmm8[u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm9, %xmm8, %xmm13
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm14, %ymm9
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm7, %ymm6, %ymm9
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm10
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm10[5,12],zero,zero,xmm10[1,8,15,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[0,7,14],zero,zero,xmm9[3,10],zero,zero,zero,xmm9[u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpor %xmm10, %xmm9, %xmm13
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
 ; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm9
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm11, %ymm8
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm3, %ymm9, %ymm8
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm11, %ymm15
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm1, %ymm9, %ymm15
 ; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm10
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0,1],ymm10[2],ymm8[3,4],ymm10[5],ymm8[6,7,8,9],ymm10[10],ymm8[11,12],ymm10[13],ymm8[14,15]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm15[0,1],ymm10[2],ymm15[3,4],ymm10[5],ymm15[6,7,8,9],ymm10[10],ymm15[11,12],ymm10[13],ymm15[14,15]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm13, %ymm8
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
 ; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm12, %ymm16, %ymm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm19
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm18
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm11, %ymm12
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm2, %ymm12
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm2, %ymm3, %ymm12
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm12[u,u,u,6,13],zero,zero,xmm12[2,9],zero,zero,zero,xmm12[u,u,u,u]
 ; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm12
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u],zero,zero,xmm12[4,11],zero,zero,xmm12[0,7,14,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vporq %xmm13, %xmm12, %xmm18
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm5[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm4[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm12, %ymm15, %ymm18
+; AVX512F-SLOW-NEXT:    vpor %xmm13, %xmm12, %xmm12
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm5[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm4[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm13[0],xmm8[1],xmm13[1],xmm8[2],xmm13[2],xmm8[3],xmm13[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm12[0,1,2,3,4,5,6],ymm8[7]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm13, %ymm12
 ; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm7, %ymm6, %ymm12
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm14
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = zero,zero,zero,xmm14[6,13],zero,zero,xmm14[2,9,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm15
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm15[6,13],zero,zero,xmm15[2,9,u,u,u,u,u,u,u]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[1,8,15],zero,zero,xmm12[4,11],zero,zero,xmm12[u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm14, %xmm12, %xmm14
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, %ymm12
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm9, %ymm3, %ymm12
+; AVX512F-SLOW-NEXT:    vpor %xmm15, %xmm12, %xmm15
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm14, %ymm12
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm9, %ymm1, %ymm12
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm10[2],ymm12[3,4,5],ymm10[6],ymm12[7,8,9],ymm10[10],ymm12[11,12,13],ymm10[14],ymm12[15]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm12[0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm14, %ymm12
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm18, %ymm16, %ymm12
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm2, %ymm17, %ymm1
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm14
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u],zero,zero,zero,xmm14[5,12],zero,zero,xmm14[1,8,15,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,0,7,14],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm1, %xmm14, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm5, %xmm14
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm4[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm8, %ymm15, %ymm1
+; AVX512F-SLOW-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm12
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm8, %ymm16, %ymm12
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm19
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm8
 ; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm7, %ymm6, %ymm8
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm8[2,9],zero,zero,zero,xmm8[5,12],zero,zero,xmm8[u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm8[2,9],zero,zero,zero,xmm8[5,12],zero,zero,xmm8[u,u,u,u,u,u,u]
 ; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm8
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[0,7,14],zero,zero,xmm8[3,10,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm14, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm13, %ymm14
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm9, %ymm3, %ymm14
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm14[0,1,2],ymm10[3],ymm14[4,5],ymm10[6],ymm14[7,8,9,10],ymm10[11],ymm14[12,13],ymm10[14],ymm14[15]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm18, %ymm8, %ymm14
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm14[0],ymm1[1,2,3,4,5,6,7],ymm14[8],ymm1[9,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm14[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm20
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm13, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm2, %ymm17, %ymm1
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u],zero,zero,zero,xmm8[6,13],zero,zero,xmm8[2,9,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,1,8,15],zero,zero,xmm1[4,11],zero,zero,xmm1[u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm1, %xmm8, %xmm1
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[5,12]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm4[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
+; AVX512F-SLOW-NEXT:    vpor %xmm15, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm13, %ymm15
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm9, %ymm1, %ymm15
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm15[0,1,2],ymm10[3],ymm15[4,5],ymm10[6],ymm15[7,8,9,10],ymm10[11],ymm15[12,13],ymm10[14],ymm15[15]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm17, %ymm8, %ymm15
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm3, %ymm2, %ymm14
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u],zero,zero,zero,xmm8[5,12],zero,zero,xmm8[1,8,15,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,0,7,14],zero,zero,xmm14[3,10],zero,zero,zero,xmm14[u,u,u,u]
 ; AVX512F-SLOW-NEXT:    vpor %xmm8, %xmm14, %xmm8
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm12
+; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm5, %xmm14
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm4[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm12[0],xmm14[0],xmm12[1],xmm14[1],xmm12[2],xmm14[2],xmm12[3],xmm14[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6],ymm12[7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm15[0],ymm8[1,2,3,4,5,6,7],ymm15[8],ymm8[9,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm15[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm20
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm13, %ymm8
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm3, %ymm2, %ymm8
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm12
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[2,9,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,1,8,15],zero,zero,xmm8[4,11],zero,zero,xmm8[u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpor %xmm12, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[5,12]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm4[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
+; AVX512F-SLOW-NEXT:    vpor %xmm12, %xmm14, %xmm12
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm16, %ymm8
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm11, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm7, %ymm6, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm1[3,10],zero,zero,zero,xmm1[6,13],zero,zero,xmm1[u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,8,15],zero,zero,xmm1[4,11,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm1, %xmm14, %xmm1
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm8, %ymm16, %ymm12
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm11, %ymm8
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm7, %ymm6, %ymm8
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm8[3,10],zero,zero,zero,xmm8[6,13],zero,zero,xmm8[u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[1,8,15],zero,zero,xmm8[4,11,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpor %xmm14, %xmm8, %xmm8
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm14
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm9, %ymm3, %ymm14
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm9, %ymm1, %ymm14
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm10[0],ymm14[1,2],ymm10[3],ymm14[4,5,6],ymm10[7,8],ymm14[9,10],ymm10[11],ymm14[12,13,14],ymm10[15]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[2,9,0,7,14,5,12,19,26],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm18, %ymm1, %ymm14
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm14[0],ymm8[1,2,3,4,5,6,7],ymm14[8],ymm8[9,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm14[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm21
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm2, %ymm17, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm1[u,u,2,9],zero,zero,zero,xmm1[5,12],zero,zero,xmm1[u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u],zero,zero,xmm1[0,7,14],zero,zero,xmm1[3,10,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm1, %xmm8, %xmm1
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[6,13]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm4[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
-; AVX512F-SLOW-NEXT:    vpor %xmm8, %xmm14, %xmm8
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm17, %ymm8, %ymm14
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm14[0],ymm12[1,2,3,4,5,6,7],ymm14[8],ymm12[9,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm21
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm3, %ymm2, %ymm8
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm8[u,u,2,9],zero,zero,zero,xmm8[5,12],zero,zero,xmm8[u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u],zero,zero,xmm8[0,7,14],zero,zero,xmm8[3,10,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpor %xmm12, %xmm8, %xmm8
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm16, %ymm8
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm13, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm6, %ymm7, %ymm1
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm14
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[6,13]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm4[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
+; AVX512F-SLOW-NEXT:    vpor %xmm12, %xmm14, %xmm12
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm8, %ymm16, %ymm12
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm13, %ymm8
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm6, %ymm7, %ymm8
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm14
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = zero,zero,xmm14[2,9],zero,zero,zero,xmm14[5,12,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[4,11],zero,zero,xmm1[0,7,14],zero,zero,xmm1[u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm1, %xmm14, %xmm1
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[4,11],zero,zero,xmm8[0,7,14],zero,zero,xmm8[u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpor %xmm14, %xmm8, %xmm8
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm11, %ymm14
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm9, %ymm3, %ymm14
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm9, %ymm1, %ymm14
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm10[0],ymm14[1,2,3],ymm10[4],ymm14[5,6],ymm10[7,8],ymm14[9,10,11],ymm10[12],ymm14[13,14],ymm10[15]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[3,10,1,8,15,6,13,20,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm18, %ymm1, %ymm14
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm14[0],ymm8[1,2,3,4,5,6,7],ymm14[8],ymm8[9,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm11, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm2, %ymm17, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm1[u,u,3,10],zero,zero,zero,xmm1[6,13],zero,zero,xmm1[u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u],zero,zero,xmm1[1,8,15],zero,zero,xmm1[4,11,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm1, %xmm8, %xmm1
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm4[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[0,7,14]
-; AVX512F-SLOW-NEXT:    vpor %xmm8, %xmm15, %xmm8
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm17, %ymm8, %ymm14
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm14[0],ymm12[1,2,3,4,5,6,7],ymm14[8],ymm12[9,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm11, %ymm8
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm3, %ymm2, %ymm8
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm8[u,u,3,10],zero,zero,zero,xmm8[6,13],zero,zero,xmm8[u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u],zero,zero,xmm8[1,8,15],zero,zero,xmm8[4,11,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpor %xmm12, %xmm8, %xmm8
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm16, %ymm8
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm6, %ymm7, %ymm1
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm15
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm4[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[0,7,14]
+; AVX512F-SLOW-NEXT:    vpor %xmm12, %xmm15, %xmm12
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm8, %ymm16, %ymm12
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm6, %ymm7, %ymm8
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm15
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,xmm15[3,10],zero,zero,zero,xmm15[6,13,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[5,12],zero,zero,xmm1[1,8,15],zero,zero,xmm1[u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm1, %xmm15, %xmm1
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[5,12],zero,zero,xmm8[1,8,15],zero,zero,xmm8[u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpor %xmm15, %xmm8, %xmm8
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm13, %ymm15
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm3, %ymm9, %ymm15
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm1, %ymm9, %ymm15
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm15[0],ymm10[1],ymm15[2,3],ymm10[4],ymm15[5,6,7,8],ymm10[9],ymm15[10,11],ymm10[12],ymm15[13,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[4,11,2,9,0,7,14,21,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm18, %ymm1, %ymm15
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm15[0],ymm8[1,2,3,4,5,6,7],ymm15[8],ymm8[9,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm15[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm2, %ymm13
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm17, %ymm8, %ymm15
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm15[0],ymm12[1,2,3,4,5,6,7],ymm15[8],ymm12[9,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm15[0,1,2,3],ymm8[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm2, %ymm3, %ymm13
 ; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm2
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[5,12,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm13[u,u,4,11],zero,zero,xmm13[0,7,14],zero,zero,xmm13[u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm2, %xmm8, %xmm2
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm13[u,u,4,11],zero,zero,xmm13[0,7,14],zero,zero,xmm13[u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpor %xmm2, %xmm3, %xmm2
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,8,15]
-; AVX512F-SLOW-NEXT:    vpor %xmm4, %xmm5, %xmm4
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm16, %ymm4
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm4[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,8,15]
+; AVX512F-SLOW-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm16, %ymm3
 ; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm6, %ymm7, %ymm11
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm11[6,13],zero,zero,xmm11[2,9],zero,zero,zero,xmm11[u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm5
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,xmm5[4,11],zero,zero,xmm5[0,7,14,u,u,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm2, %xmm5, %xmm2
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm3, %ymm9, %ymm0
+; AVX512F-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[4,11],zero,zero,xmm4[0,7,14,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpor %xmm2, %xmm4, %xmm2
+; AVX512F-SLOW-NEXT:    vpternlogq $202, %ymm1, %ymm9, %ymm0
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm10[1],ymm0[2,3,4],ymm10[5],ymm0[6,7,8],ymm10[9],ymm0[10,11,12],ymm10[13],ymm0[14,15]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[5,12,3,10,1,8,15,22,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm18, %ymm2, %ymm0
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0],ymm4[1,2,3,4,5,6,7],ymm0[8],ymm4[9,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm19, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm12, (%rdx)
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm17, %ymm2, %ymm0
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm0[0],ymm3[1,2,3,4,5,6,7],ymm0[8],ymm3[9,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm18, (%rsi)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm19, (%rdx)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, (%rcx)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm21, (%r8)
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm14, (%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, (%r10)
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm8, (%r10)
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, (%rax)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
@@ -3916,19 +3921,18 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[6,13,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,5,12],zero,zero,xmm1[1,8,15],zero,zero,xmm1[u,u,u,u]
-; AVX512F-FAST-NEXT:    vpor %xmm4, %xmm1, %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,1,2,4,6>
+; AVX512F-FAST-NEXT:    vpor %xmm4, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,1,2,4,6>
 ; AVX512F-FAST-NEXT:    vmovdqa 192(%rdi), %ymm12
-; AVX512F-FAST-NEXT:    vpermd %ymm12, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,23,26,29]
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [0,1,2,15,0,1,2,15]
-; AVX512F-FAST-NEXT:    # ymm13 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermt2d %ymm1, %ymm13, %ymm6
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
+; AVX512F-FAST-NEXT:    vpermd %ymm12, %ymm4, %ymm4
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,23,26,29]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm1[0,1,2,3,4,5,6],ymm4[7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm4
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm5
 ; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa %ymm14, %ymm7
+; AVX512F-FAST-NEXT:    vmovdqa %ymm13, %ymm7
 ; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm5, %ymm4, %ymm7
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm8
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm8[5,12],zero,zero,xmm8[1,8,15,u,u,u,u,u,u]
@@ -3951,46 +3955,48 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm6
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u],zero,zero,xmm6[4,11],zero,zero,xmm6[0,7,14,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpor %xmm6, %xmm10, %xmm6
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,1,3,4,6>
 ; AVX512F-FAST-NEXT:    vpermd %ymm12, %ymm10, %ymm10
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,17,20,27,30]
-; AVX512F-FAST-NEXT:    vpermt2d %ymm10, %ymm13, %ymm6
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm10[7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm11, %ymm10
 ; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm5, %ymm4, %ymm10
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm15
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm15[6,13],zero,zero,xmm15[2,9,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm14
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = zero,zero,zero,xmm14[6,13],zero,zero,xmm14[2,9,u,u,u,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[1,8,15],zero,zero,xmm10[4,11],zero,zero,xmm10[u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpor %xmm15, %xmm10, %xmm15
-; AVX512F-FAST-NEXT:    vmovdqa %ymm14, %ymm10
+; AVX512F-FAST-NEXT:    vpor %xmm14, %xmm10, %xmm14
+; AVX512F-FAST-NEXT:    vmovdqa %ymm13, %ymm10
 ; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm7, %ymm1, %ymm10
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm8[2],ymm10[3,4,5],ymm8[6],ymm10[7,8,9],ymm8[10],ymm10[11,12,13],ymm8[14],ymm10[15]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm10
+; AVX512F-FAST-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm14, %ymm10
 ; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm6, %ymm16, %ymm10
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm3, %ymm2, %ymm14
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm6
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,zero,xmm6[5,12],zero,zero,xmm6[1,8,15,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,0,7,14],zero,zero,xmm14[3,10],zero,zero,zero,xmm14[u,u,u,u]
+; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm6
+; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm5, %ymm4, %ymm6
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm6[2,9],zero,zero,zero,xmm6[5,12],zero,zero,xmm6[u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm6
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,xmm6[0,7,14],zero,zero,xmm6[3,10,u,u,u,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpor %xmm6, %xmm14, %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,1,3,5,6>
-; AVX512F-FAST-NEXT:    vpermd %ymm12, %ymm14, %ymm12
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,21,24,31]
-; AVX512F-FAST-NEXT:    vpermt2d %ymm12, %ymm13, %ymm6
-; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm12
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm5, %ymm4, %ymm12
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm12[2,9],zero,zero,zero,xmm12[5,12],zero,zero,xmm12[u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm12
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = zero,zero,xmm12[0,7,14],zero,zero,xmm12[3,10,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpor %xmm13, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vmovdqa %ymm11, %ymm13
-; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm7, %ymm1, %ymm13
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0,1,2],ymm8[3],ymm13[4,5],ymm8[6],ymm13[7,8,9,10],ymm8[11],ymm13[12,13],ymm8[14],ymm13[15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm13[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-FAST-NEXT:    vmovdqa %ymm11, %ymm14
+; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm7, %ymm1, %ymm14
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm14[0,1,2],ymm8[3],ymm14[4,5],ymm8[6],ymm14[7,8,9,10],ymm8[11],ymm14[12,13],ymm8[14],ymm14[15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm14[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm17, %ymm12, %ymm13
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm13[0],ymm6[1,2,3,4,5,6,7],ymm13[8],ymm6[9,10,11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm13[0,1,2,3],ymm6[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm17, %ymm6, %ymm14
+; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm3, %ymm2, %ymm13
+; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm6
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,zero,xmm6[5,12],zero,zero,xmm6[1,8,15,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,0,7,14],zero,zero,xmm13[3,10],zero,zero,zero,xmm13[u,u,u,u]
+; AVX512F-FAST-NEXT:    vpor %xmm6, %xmm13, %xmm6
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,1,3,5,6>
+; AVX512F-FAST-NEXT:    vpermd %ymm12, %ymm13, %ymm12
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,21,24,31]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm12[7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm14[0],ymm6[1,2,3,4,5,6,7],ymm14[8],ymm6[9,10,11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm14[0,1,2,3],ymm6[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm6, %ymm19
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm11, %ymm6
 ; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm3, %ymm2, %ymm6
@@ -4000,13 +4006,13 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-FAST-NEXT:    vpor %xmm6, %xmm12, %xmm6
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
 ; AVX512F-FAST-NEXT:    vmovdqa 208(%rdi), %xmm14
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm14[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm14[5,12]
-; AVX512F-FAST-NEXT:    vmovdqa 192(%rdi), %xmm13
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm13[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
-; AVX512F-FAST-NEXT:    vpor %xmm12, %xmm15, %xmm12
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm14[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm14[5,12]
+; AVX512F-FAST-NEXT:    vmovdqa 192(%rdi), %xmm12
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm12[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
+; AVX512F-FAST-NEXT:    vpor %xmm13, %xmm15, %xmm13
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm16, %ymm12
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm16, %ymm13
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm9, %ymm6
 ; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm5, %ymm4, %ymm6
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm6[3,10],zero,zero,zero,xmm6[6,13],zero,zero,xmm6[u,u,u,u,u,u,u]
@@ -4018,21 +4024,21 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm15 = ymm8[0],ymm15[1,2],ymm8[3],ymm15[4,5,6],ymm8[7,8],ymm15[9,10],ymm8[11],ymm15[12,13,14],ymm8[15]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[2,9,0,7,14,5,12,19,26],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm17, %ymm6, %ymm15
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm15[0],ymm12[1,2,3,4,5,6,7],ymm15[8],ymm12[9,10,11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm15[0],ymm13[1,2,3,4,5,6,7],ymm15[8],ymm13[9,10,11,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm15[0,1,2,3],ymm6[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm6, %ymm20
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm6
 ; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm3, %ymm2, %ymm6
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm6[u,u,2,9],zero,zero,zero,xmm6[5,12],zero,zero,xmm6[u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm6[u,u,2,9],zero,zero,zero,xmm6[5,12],zero,zero,xmm6[u,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm6
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,xmm6[0,7,14],zero,zero,xmm6[3,10,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpor %xmm6, %xmm12, %xmm6
+; AVX512F-FAST-NEXT:    vpor %xmm6, %xmm13, %xmm6
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm14[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm14[6,13]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm13[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
-; AVX512F-FAST-NEXT:    vpor %xmm12, %xmm15, %xmm12
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm16, %ymm12
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm14[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm14[6,13]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm12[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
+; AVX512F-FAST-NEXT:    vpor %xmm13, %xmm15, %xmm13
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm16, %ymm13
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm11, %ymm6
 ; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm4, %ymm5, %ymm6
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm15
@@ -4044,7 +4050,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm15 = ymm8[0],ymm15[1,2,3],ymm8[4],ymm15[5,6],ymm8[7,8],ymm15[9,10,11],ymm8[12],ymm15[13,14],ymm8[15]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[3,10,1,8,15,6,13,20,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm17, %ymm6, %ymm15
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm15[0],ymm12[1,2,3,4,5,6,7],ymm15[8],ymm12[9,10,11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm15[0],ymm13[1,2,3,4,5,6,7],ymm15[8],ymm13[9,10,11,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm15[0,1,2,3],ymm6[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm6, %ymm21
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm9, %ymm6
@@ -4054,11 +4060,11 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,xmm6[1,8,15],zero,zero,xmm6[4,11,u,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpor %xmm6, %xmm15, %xmm6
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm13[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm14[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm14[0,7,14]
-; AVX512F-FAST-NEXT:    vpor %xmm15, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm16, %ymm12
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm12[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm14[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm14[0,7,14]
+; AVX512F-FAST-NEXT:    vpor %xmm15, %xmm13, %xmm13
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm16, %ymm13
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm0, %ymm6
 ; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm4, %ymm5, %ymm6
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm15
@@ -4070,7 +4076,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm15 = ymm15[0],ymm8[1],ymm15[2,3],ymm8[4],ymm15[5,6,7,8],ymm8[9],ymm15[10,11],ymm8[12],ymm15[13,14,15]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm15[4,11,2,9,0,7,14,21,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm17, %ymm6, %ymm15
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm15[0],ymm12[1,2,3,4,5,6,7],ymm15[8],ymm12[9,10,11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm15[0],ymm13[1,2,3,4,5,6,7],ymm15[8],ymm13[9,10,11,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpblendd {{.*#+}} ymm6 = ymm15[0,1,2,3],ymm6[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpternlogq $202, %ymm2, %ymm3, %ymm11
 ; AVX512F-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm2
@@ -4078,7 +4084,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm11[u,u,4,11],zero,zero,xmm11[0,7,14],zero,zero,xmm11[u,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpor %xmm2, %xmm3, %xmm2
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm13[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm12[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm14[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm14[1,8,15]
 ; AVX512F-FAST-NEXT:    vpor %xmm3, %xmm11, %xmm3
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
@@ -4109,180 +4115,184 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <16,9,2,19,20,13,6,23,24,u,26,27,28,u,30,31>
-; AVX512BW-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm2
-; AVX512BW-SLOW-NEXT:    vpermw %zmm2, %zmm0, %zmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <16,9,2,19,12,5,22,23,24,u,26,27,u,29,30,31>
-; AVX512BW-SLOW-NEXT:    vpermw %zmm2, %zmm1, %zmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <8,1,2,19,12,5,22,15,u,9,26,11,u,29,14,u>
-; AVX512BW-SLOW-NEXT:    vpermw %zmm2, %zmm3, %zmm9
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <8,1,18,11,4,5,22,15,u,25,10,u,12,29,14,u>
-; AVX512BW-SLOW-NEXT:    vpermw %zmm2, %zmm3, %zmm10
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,1,18,11,4,21,14,7,8,25,10,u,28,13,u,15>
-; AVX512BW-SLOW-NEXT:    vpermw %zmm2, %zmm3, %zmm12
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <0,17,10,3,4,21,14,7,24,9,u,11,28,13,u,31>
-; AVX512BW-SLOW-NEXT:    vpermw %zmm2, %zmm3, %zmm11
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <16,17,10,3,20,13,6,23,24,25,u,27,28,u,30,31>
-; AVX512BW-SLOW-NEXT:    vpermw %zmm2, %zmm3, %zmm15
-; AVX512BW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512BW-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm2
+; AVX512BW-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm1
+; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm0, %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <16,9,2,19,12,5,22,23,24,u,26,27,u,29,30,31>
+; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm2, %zmm4
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <8,1,2,19,12,5,22,15,u,9,26,11,u,29,14,u>
+; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm2, %zmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <8,1,18,11,4,5,22,15,u,25,10,u,12,29,14,u>
+; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm2, %zmm11
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,1,18,11,4,21,14,7,8,25,10,u,28,13,u,15>
+; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm2, %zmm12
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,17,10,3,4,21,14,7,24,9,u,11,28,13,u,31>
+; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm2, %zmm10
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <16,17,10,3,20,13,6,23,24,25,u,27,28,u,30,31>
+; AVX512BW-SLOW-NEXT:    vpermw %zmm1, %zmm2, %zmm6
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %ymm3
+; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm2
+; AVX512BW-SLOW-NEXT:    movw $-28382, %r11w # imm = 0x9122
+; AVX512BW-SLOW-NEXT:    kmovd %r11d, %k5
+; AVX512BW-SLOW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm1 {%k5}
+; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm7
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[5,12],zero,zero,xmm7[1,8,15,u,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,7,14],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[u,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpor %xmm7, %xmm1, %xmm1
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    movw $992, %r11w # imm = 0x3E0
+; AVX512BW-SLOW-NEXT:    kmovd %r11d, %k1
+; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm6, %ymm1 {%k1}
+; AVX512BW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm7
+; AVX512BW-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm6
 ; AVX512BW-SLOW-NEXT:    movw $8772, %r11w # imm = 0x2244
 ; AVX512BW-SLOW-NEXT:    kmovd %r11d, %k1
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm3, %ymm2, %ymm4 {%k1}
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm5
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[6,13,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,5,12],zero,zero,xmm4[1,8,15],zero,zero,xmm4[u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vporq %xmm5, %xmm4, %xmm16
-; AVX512BW-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm4
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm13, %xmm4, %xmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm7
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
-; AVX512BW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [0,1,2,11,0,1,2,11]
-; AVX512BW-SLOW-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm5, %ymm14, %ymm16
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %ymm8
-; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm6
-; AVX512BW-SLOW-NEXT:    movw $-28382, %di # imm = 0x9122
+; AVX512BW-SLOW-NEXT:    vpblendmw %ymm7, %ymm6, %ymm8 {%k1}
+; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm9
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u],zero,zero,xmm9[3,10],zero,zero,zero,xmm9[6,13,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,5,12],zero,zero,xmm8[1,8,15],zero,zero,xmm8[u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpor %xmm9, %xmm8, %xmm8
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm13
+; AVX512BW-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm8
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm8, %xmm15
+; AVX512BW-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm9
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm9[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm15[0],xmm16[0],xmm15[1],xmm16[1],xmm15[2],xmm16[2],xmm15[3],xmm16[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,6],ymm15[7]
+; AVX512BW-SLOW-NEXT:    movl $-524288, %edi # imm = 0xFFF80000
 ; AVX512BW-SLOW-NEXT:    kmovd %edi, %k4
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm6, %ymm8, %ymm5 {%k4}
-; AVX512BW-SLOW-NEXT:    vextracti32x4 $1, %ymm5, %xmm17
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = zero,zero,zero,xmm17[5,12],zero,zero,xmm17[1,8,15,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,7,14],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vporq %xmm17, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 = ymm15[u,u,u,u,u,u,u,u,u,u,6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    movw $992, %di # imm = 0x3E0
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm13, %ymm1 {%k4}
+; AVX512BW-SLOW-NEXT:    movw $4644, %di # imm = 0x1224
 ; AVX512BW-SLOW-NEXT:    kmovd %edi, %k2
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm15, %ymm5 {%k2}
-; AVX512BW-SLOW-NEXT:    movl $-524288, %edi # imm = 0xFFF80000
-; AVX512BW-SLOW-NEXT:    kmovd %edi, %k5
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm16, %ymm5 {%k5}
+; AVX512BW-SLOW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm13 {%k2}
+; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm15
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm15[6,13],zero,zero,xmm15[2,9,u,u,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[1,8,15],zero,zero,xmm13[4,11],zero,zero,xmm13[u,u,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpor %xmm15, %xmm13, %xmm13
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    movl $511, %edi # imm = 0x1FF
+; AVX512BW-SLOW-NEXT:    kmovd %edi, %k3
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm13, %ymm10 {%k3}
 ; AVX512BW-SLOW-NEXT:    movw $9288, %di # imm = 0x2448
-; AVX512BW-SLOW-NEXT:    kmovd %edi, %k2
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm3, %ymm2, %ymm15 {%k2}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm15[u,u,u,6,13],zero,zero,xmm15[2,9],zero,zero,zero,xmm15[u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm15, %xmm15
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm15[u,u,u],zero,zero,xmm15[4,11],zero,zero,xmm15[0,7,14,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vporq %xmm16, %xmm15, %xmm15
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm7[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = xmm4[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm16 = xmm17[0],xmm16[0],xmm17[1],xmm16[1],xmm17[2],xmm16[2],xmm17[3],xmm16[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm16, %ymm14, %ymm15
-; AVX512BW-SLOW-NEXT:    movw $4644, %di # imm = 0x1224
 ; AVX512BW-SLOW-NEXT:    kmovd %edi, %k3
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm6, %ymm8, %ymm16 {%k3}
-; AVX512BW-SLOW-NEXT:    vextracti32x4 $1, %ymm16, %xmm17
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = zero,zero,zero,xmm17[6,13],zero,zero,xmm17[2,9,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm16[1,8,15],zero,zero,xmm16[4,11],zero,zero,xmm16[u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vporq %xmm17, %xmm16, %xmm16
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    movl $511, %edi # imm = 0x1FF
-; AVX512BW-SLOW-NEXT:    kmovd %edi, %k6
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm16, %ymm11 {%k6}
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm15, %ymm11 {%k5}
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm15 {%k4}
-; AVX512BW-SLOW-NEXT:    vextracti32x4 $1, %ymm15, %xmm16
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm16[u,u],zero,zero,zero,xmm16[5,12],zero,zero,xmm16[1,8,15,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm15[u,u,0,7,14],zero,zero,xmm15[3,10],zero,zero,zero,xmm15[u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vporq %xmm16, %xmm15, %xmm15
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm13, %xmm7, %xmm13
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm4[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm16[0],xmm13[0],xmm16[1],xmm13[1],xmm16[2],xmm13[2],xmm16[3],xmm13[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm13, %ymm14, %ymm15
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm6, %ymm8, %ymm13 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm13[2,9],zero,zero,zero,xmm13[5,12],zero,zero,xmm13[u,u,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpblendmw %ymm7, %ymm6, %ymm13 {%k3}
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm13[u,u,u,6,13],zero,zero,xmm13[2,9],zero,zero,zero,xmm13[u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm13
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,u],zero,zero,xmm13[4,11],zero,zero,xmm13[0,7,14,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpor %xmm15, %xmm13, %xmm13
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm9[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm8[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm16[0],xmm15[0],xmm16[1],xmm15[1],xmm16[2],xmm15[2],xmm16[3],xmm15[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,6],ymm15[7]
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm13, %ymm10 {%k4}
+; AVX512BW-SLOW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm13 {%k1}
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm13[2,9],zero,zero,zero,xmm13[5,12],zero,zero,xmm13[u,u,u,u,u,u,u]
 ; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm13
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = zero,zero,xmm13[0,7,14],zero,zero,xmm13[3,10,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm14, %xmm13, %xmm13
+; AVX512BW-SLOW-NEXT:    vpor %xmm15, %xmm13, %xmm13
 ; AVX512BW-SLOW-NEXT:    movl $261632, %edi # imm = 0x3FE00
 ; AVX512BW-SLOW-NEXT:    kmovd %edi, %k4
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 {%k4} = ymm12[u,u,u,u,u,u,u,u,u,1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm13[0],ymm15[1,2,3,4,5,6,7],ymm13[8],ymm15[9,10,11,12,13,14,15]
+; AVX512BW-SLOW-NEXT:    vpblendmw %ymm6, %ymm7, %ymm12 {%k5}
+; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm15
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm15[u,u],zero,zero,zero,xmm15[5,12],zero,zero,xmm15[1,8,15,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,0,7,14],zero,zero,xmm12[3,10],zero,zero,zero,xmm12[u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpor %xmm15, %xmm12, %xmm12
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm9, %xmm14
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm8[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5,6],ymm14[7]
+; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm13[0],ymm12[1,2,3,4,5,6,7],ymm13[8],ymm12[9,10,11,12,13,14,15]
 ; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0,1,2,3],ymm12[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm13 {%k3}
+; AVX512BW-SLOW-NEXT:    vpblendmw %ymm6, %ymm7, %ymm13 {%k2}
 ; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm14
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u],zero,zero,zero,xmm14[6,13],zero,zero,xmm14[2,9,u,u,u,u,u]
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,1,8,15],zero,zero,xmm13[4,11],zero,zero,xmm13[u,u,u,u,u]
 ; AVX512BW-SLOW-NEXT:    vpor %xmm14, %xmm13, %xmm13
 ; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm7[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm7[5,12]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm4[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm9[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm9[5,12]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm8[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
 ; AVX512BW-SLOW-NEXT:    vpor %xmm14, %xmm15, %xmm14
 ; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
 ; AVX512BW-SLOW-NEXT:    movl $-134217728, %edi # imm = 0xF8000000
 ; AVX512BW-SLOW-NEXT:    kmovd %edi, %k5
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm14, %ymm13 {%k5}
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm6, %ymm8, %ymm14 {%k2}
+; AVX512BW-SLOW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm14 {%k3}
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[3,10],zero,zero,zero,xmm14[6,13],zero,zero,xmm14[u,u,u,u,u,u,u]
 ; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm14
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = zero,zero,xmm14[1,8,15],zero,zero,xmm14[4,11,u,u,u,u,u,u,u]
 ; AVX512BW-SLOW-NEXT:    vpor %xmm15, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 {%k4} = ymm10[u,u,u,u,u,u,u,u,u,2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm14[0],ymm13[1,2,3,4,5,6,7],ymm14[8],ymm13[9,10,11,12,13,14,15]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm14[0,1,2,3],ymm10[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm13 {%k1}
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 {%k4} = ymm11[u,u,u,u,u,u,u,u,u,2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm14[0],ymm13[1,2,3,4,5,6,7],ymm14[8],ymm13[9,10,11,12,13,14,15]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm14[0,1,2,3],ymm11[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vpblendmw %ymm6, %ymm7, %ymm13 {%k1}
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm13[u,u,2,9],zero,zero,zero,xmm13[5,12],zero,zero,xmm13[u,u,u,u,u]
 ; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm13
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u],zero,zero,xmm13[0,7,14],zero,zero,xmm13[3,10,u,u,u,u,u]
 ; AVX512BW-SLOW-NEXT:    vpor %xmm14, %xmm13, %xmm13
 ; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm7[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm7[6,13]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm4[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm9[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm9[6,13]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm8[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
 ; AVX512BW-SLOW-NEXT:    vpor %xmm14, %xmm15, %xmm14
 ; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm14, %ymm13 {%k5}
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm8, %ymm6, %ymm14 {%k3}
+; AVX512BW-SLOW-NEXT:    vpblendmw %ymm3, %ymm2, %ymm14 {%k2}
 ; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm15
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,xmm15[2,9],zero,zero,zero,xmm15[5,12,u,u,u,u,u,u,u]
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[4,11],zero,zero,xmm14[0,7,14],zero,zero,xmm14[u,u,u,u,u,u,u]
 ; AVX512BW-SLOW-NEXT:    vpor %xmm15, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm14[0],ymm13[1,2,3,4,5,6,7],ymm14[8],ymm13[9,10,11,12,13,14,15]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm14[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm2, %ymm3, %ymm13 {%k2}
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 {%k4} = ymm5[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm14[0],ymm13[1,2,3,4,5,6,7],ymm14[8],ymm13[9,10,11,12,13,14,15]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm14[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vpblendmw %ymm6, %ymm7, %ymm13 {%k3}
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm13[u,u,3,10],zero,zero,zero,xmm13[6,13],zero,zero,xmm13[u,u,u,u,u]
 ; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm13
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u],zero,zero,xmm13[1,8,15],zero,zero,xmm13[4,11,u,u,u,u,u]
 ; AVX512BW-SLOW-NEXT:    vpor %xmm14, %xmm13, %xmm13
 ; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm4[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm7[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm7[0,7,14]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm8[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm9[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm9[0,7,14]
 ; AVX512BW-SLOW-NEXT:    vpor %xmm14, %xmm15, %xmm14
 ; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm14, %ymm13 {%k5}
-; AVX512BW-SLOW-NEXT:    vpblendmw %ymm8, %ymm6, %ymm14 {%k1}
+; AVX512BW-SLOW-NEXT:    vpblendmw %ymm3, %ymm2, %ymm14 {%k1}
 ; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm15
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,xmm15[3,10],zero,zero,zero,xmm15[6,13,u,u,u,u,u,u,u]
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[5,12],zero,zero,xmm14[1,8,15],zero,zero,xmm14[u,u,u,u,u,u,u]
 ; AVX512BW-SLOW-NEXT:    vpor %xmm15, %xmm14, %xmm14
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 {%k4} = ymm1[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm14[0],ymm13[1,2,3,4,5,6,7],ymm14[8],ymm13[9,10,11,12,13,14,15]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm14[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 {%k4} = ymm4[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm14[0],ymm13[1,2,3,4,5,6,7],ymm14[8],ymm13[9,10,11,12,13,14,15]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm14[0,1,2,3],ymm4[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm7, %ymm6 {%k2}
+; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm7
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u],zero,zero,xmm7[2,9],zero,zero,zero,xmm7[5,12,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,4,11],zero,zero,xmm6[0,7,14],zero,zero,xmm6[u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpor %xmm7, %xmm6, %xmm6
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm8[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm9[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm9[1,8,15]
+; AVX512BW-SLOW-NEXT:    vpor %xmm7, %xmm8, %xmm7
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm7, %ymm6 {%k5}
 ; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm3, %ymm2 {%k3}
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[5,12,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,4,11],zero,zero,xmm2[0,7,14],zero,zero,xmm2[u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm2[6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u,u,u,u]
 ; AVX512BW-SLOW-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm4[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm7[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm7[1,8,15]
-; AVX512BW-SLOW-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm3, %ymm2 {%k5}
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm8, %ymm6 {%k2}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm6[6,13],zero,zero,xmm6[2,9],zero,zero,zero,xmm6[u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm4
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[4,11],zero,zero,xmm4[0,7,14,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 {%k4} = ymm0[u,u,u,u,u,u,u,u,u,5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0],ymm2[1,2,3,4,5,6,7],ymm3[8],ymm2[9,10,11,12,13,14,15]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm5, (%rsi)
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm11, (%rdx)
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 {%k4} = ymm0[u,u,u,u,u,u,u,u,u,5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm6[1,2,3,4,5,6,7],ymm2[8],ymm6[9,10,11,12,13,14,15]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa %ymm1, (%rsi)
+; AVX512BW-SLOW-NEXT:    vmovdqa %ymm10, (%rdx)
 ; AVX512BW-SLOW-NEXT:    vmovdqa %ymm12, (%rcx)
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm10, (%r8)
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm9, (%r9)
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm1, (%r10)
+; AVX512BW-SLOW-NEXT:    vmovdqa %ymm11, (%r8)
+; AVX512BW-SLOW-NEXT:    vmovdqa %ymm5, (%r9)
+; AVX512BW-SLOW-NEXT:    vmovdqa %ymm4, (%r10)
 ; AVX512BW-SLOW-NEXT:    vmovdqa %ymm0, (%rax)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
 ; AVX512BW-SLOW-NEXT:    retq
@@ -4292,85 +4302,76 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [16,9,2,19,20,13,6,23,24,17,26,27,28,21,30,31]
-; AVX512BW-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm2
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [16,9,2,19,12,5,22,23,24,17,26,27,20,29,30,31]
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm1, %zmm1
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [8,1,2,19,12,5,22,15,0,9,26,11,4,29,14,7]
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm7
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [8,1,18,11,4,5,22,15,0,25,10,3,12,29,14,7]
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm9
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,18,11,4,21,14,7,8,25,10,3,28,13,6,15]
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm10
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,17,10,3,4,21,14,7,24,9,2,11,28,13,6,31]
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm8
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,17,10,3,20,13,6,23,24,25,18,27,28,21,30,31]
-; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm13
-; AVX512BW-FAST-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512BW-FAST-NEXT:    vmovdqa 160(%rdi), %ymm2
-; AVX512BW-FAST-NEXT:    movw $8772, %r11w # imm = 0x2244
-; AVX512BW-FAST-NEXT:    kmovd %r11d, %k1
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm3, %ymm2, %ymm4 {%k1}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[6,13,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,5,12],zero,zero,xmm4[1,8,15],zero,zero,xmm4[u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm5, %xmm4, %xmm14
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,1,2,4,6>
-; AVX512BW-FAST-NEXT:    vmovdqa 192(%rdi), %ymm11
-; AVX512BW-FAST-NEXT:    vpermd %ymm11, %ymm4, %ymm4
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,23,26,29]
-; AVX512BW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [0,1,2,15,0,1,2,15]
-; AVX512BW-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512BW-FAST-NEXT:    vpermt2d %ymm4, %ymm12, %ymm14
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %ymm6
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %ymm5
+; AVX512BW-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm1
+; AVX512BW-FAST-NEXT:    vpermw %zmm1, %zmm0, %zmm0
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [16,9,2,19,12,5,22,23,24,17,26,27,20,29,30,31]
+; AVX512BW-FAST-NEXT:    vpermw %zmm1, %zmm2, %zmm4
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [8,1,2,19,12,5,22,15,0,9,26,11,4,29,14,7]
+; AVX512BW-FAST-NEXT:    vpermw %zmm1, %zmm2, %zmm5
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [8,1,18,11,4,5,22,15,0,25,10,3,12,29,14,7]
+; AVX512BW-FAST-NEXT:    vpermw %zmm1, %zmm2, %zmm9
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,18,11,4,21,14,7,8,25,10,3,28,13,6,15]
+; AVX512BW-FAST-NEXT:    vpermw %zmm1, %zmm2, %zmm10
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,17,10,3,4,21,14,7,24,9,2,11,28,13,6,31]
+; AVX512BW-FAST-NEXT:    vpermw %zmm1, %zmm2, %zmm8
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [16,17,10,3,20,13,6,23,24,25,18,27,28,21,30,31]
+; AVX512BW-FAST-NEXT:    vpermw %zmm1, %zmm2, %zmm6
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %ymm3
+; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %ymm2
 ; AVX512BW-FAST-NEXT:    movw $-28382, %r11w # imm = 0x9122
-; AVX512BW-FAST-NEXT:    kmovd %r11d, %k4
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm5, %ymm6, %ymm4 {%k4}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm15
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm15[5,12],zero,zero,xmm15[1,8,15,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[0,7,14],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm4, %xmm15, %xmm4
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,u,6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-FAST-NEXT:    kmovd %r11d, %k5
+; AVX512BW-FAST-NEXT:    vpblendmw %ymm2, %ymm3, %ymm1 {%k5}
+; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm7
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm7[5,12],zero,zero,xmm7[1,8,15,u,u,u,u,u,u]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,7,14],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[u,u,u,u,u,u]
+; AVX512BW-FAST-NEXT:    vpor %xmm7, %xmm1, %xmm1
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-FAST-NEXT:    movw $992, %r11w # imm = 0x3E0
-; AVX512BW-FAST-NEXT:    kmovd %r11d, %k2
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm13, %ymm4 {%k2}
+; AVX512BW-FAST-NEXT:    kmovd %r11d, %k1
+; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm6, %ymm1 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa 128(%rdi), %ymm7
+; AVX512BW-FAST-NEXT:    vmovdqa 160(%rdi), %ymm6
+; AVX512BW-FAST-NEXT:    movw $8772, %r11w # imm = 0x2244
+; AVX512BW-FAST-NEXT:    kmovd %r11d, %k1
+; AVX512BW-FAST-NEXT:    vpblendmw %ymm7, %ymm6, %ymm11 {%k1}
+; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm12
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u],zero,zero,xmm12[3,10],zero,zero,zero,xmm12[6,13,u,u,u,u]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,5,12],zero,zero,xmm11[1,8,15],zero,zero,xmm11[u,u,u,u]
+; AVX512BW-FAST-NEXT:    vpor %xmm12, %xmm11, %xmm11
+; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,1,2,4,6>
+; AVX512BW-FAST-NEXT:    vmovdqa 192(%rdi), %ymm13
+; AVX512BW-FAST-NEXT:    vpermd %ymm13, %ymm12, %ymm12
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,23,26,29]
+; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,6],ymm12[7]
 ; AVX512BW-FAST-NEXT:    movl $-524288, %r11d # imm = 0xFFF80000
-; AVX512BW-FAST-NEXT:    kmovd %r11d, %k5
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm14, %ymm4 {%k5}
-; AVX512BW-FAST-NEXT:    movw $9288, %r11w # imm = 0x2448
-; AVX512BW-FAST-NEXT:    kmovd %r11d, %k2
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm3, %ymm2, %ymm13 {%k2}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm13[u,u,u,6,13],zero,zero,xmm13[2,9],zero,zero,zero,xmm13[u,u,u,u]
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm13
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,u],zero,zero,xmm13[4,11],zero,zero,xmm13[0,7,14,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm14, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,1,3,4,6>
-; AVX512BW-FAST-NEXT:    vpermd %ymm11, %ymm14, %ymm14
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,17,20,27,30]
-; AVX512BW-FAST-NEXT:    vpermt2d %ymm14, %ymm12, %ymm13
+; AVX512BW-FAST-NEXT:    kmovd %r11d, %k4
+; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm11, %ymm1 {%k4}
 ; AVX512BW-FAST-NEXT:    movw $4644, %r11w # imm = 0x1224
-; AVX512BW-FAST-NEXT:    kmovd %r11d, %k3
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm5, %ymm6, %ymm14 {%k3}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm15
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm15[6,13],zero,zero,xmm15[2,9,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[1,8,15],zero,zero,xmm14[4,11],zero,zero,xmm14[u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm15, %xmm14, %xmm14
+; AVX512BW-FAST-NEXT:    kmovd %r11d, %k2
+; AVX512BW-FAST-NEXT:    vpblendmw %ymm2, %ymm3, %ymm11 {%k2}
+; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm12
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm12[6,13],zero,zero,xmm12[2,9,u,u,u,u,u,u,u]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[1,8,15],zero,zero,xmm11[4,11],zero,zero,xmm11[u,u,u,u,u,u,u]
+; AVX512BW-FAST-NEXT:    vpor %xmm12, %xmm11, %xmm11
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,u,u,u,u,u,0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-FAST-NEXT:    movl $511, %r11d # imm = 0x1FF
-; AVX512BW-FAST-NEXT:    kmovd %r11d, %k6
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm14, %ymm8 {%k6}
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm13, %ymm8 {%k5}
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm2, %ymm3, %ymm13 {%k4}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm14
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u],zero,zero,zero,xmm14[5,12],zero,zero,xmm14[1,8,15,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,0,7,14],zero,zero,xmm13[3,10],zero,zero,zero,xmm13[u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm14, %xmm13, %xmm13
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,1,3,5,6>
-; AVX512BW-FAST-NEXT:    vpermd %ymm11, %ymm14, %ymm11
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,21,24,31]
-; AVX512BW-FAST-NEXT:    vpermt2d %ymm11, %ymm12, %ymm13
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm5, %ymm6, %ymm11 {%k1}
+; AVX512BW-FAST-NEXT:    kmovd %r11d, %k3
+; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm11, %ymm8 {%k3}
+; AVX512BW-FAST-NEXT:    movw $9288, %r11w # imm = 0x2448
+; AVX512BW-FAST-NEXT:    kmovd %r11d, %k3
+; AVX512BW-FAST-NEXT:    vpblendmw %ymm7, %ymm6, %ymm11 {%k3}
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm11[u,u,u,6,13],zero,zero,xmm11[2,9],zero,zero,zero,xmm11[u,u,u,u]
+; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm11
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u],zero,zero,xmm11[4,11],zero,zero,xmm11[0,7,14,u,u,u,u]
+; AVX512BW-FAST-NEXT:    vpor %xmm12, %xmm11, %xmm11
+; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,1,3,4,6>
+; AVX512BW-FAST-NEXT:    vpermd %ymm13, %ymm12, %ymm12
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,17,20,27,30]
+; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5,6],ymm12[7]
+; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm11, %ymm8 {%k4}
+; AVX512BW-FAST-NEXT:    vpblendmw %ymm2, %ymm3, %ymm11 {%k1}
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm11[2,9],zero,zero,zero,xmm11[5,12],zero,zero,xmm11[u,u,u,u,u,u,u]
 ; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm11
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[0,7,14],zero,zero,xmm11[3,10,u,u,u,u,u,u,u]
@@ -4378,9 +4379,19 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-FAST-NEXT:    movl $261632, %r11d # imm = 0x3FE00
 ; AVX512BW-FAST-NEXT:    kmovd %r11d, %k4
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm11 {%k4} = ymm10[u,u,u,u,u,u,u,u,u,1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm11[0],ymm13[1,2,3,4,5,6,7],ymm11[8],ymm13[9,10,11,12,13,14,15]
+; AVX512BW-FAST-NEXT:    vpblendmw %ymm6, %ymm7, %ymm10 {%k5}
+; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm12
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u],zero,zero,zero,xmm12[5,12],zero,zero,xmm12[1,8,15,u,u,u,u]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[u,u,0,7,14],zero,zero,xmm10[3,10],zero,zero,zero,xmm10[u,u,u,u]
+; AVX512BW-FAST-NEXT:    vpor %xmm12, %xmm10, %xmm10
+; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,1,3,5,6>
+; AVX512BW-FAST-NEXT:    vpermd %ymm13, %ymm12, %ymm12
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,21,24,31]
+; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5,6],ymm12[7]
+; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm11[0],ymm10[1,2,3,4,5,6,7],ymm11[8],ymm10[9,10,11,12,13,14,15]
 ; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm2, %ymm3, %ymm11 {%k3}
+; AVX512BW-FAST-NEXT:    vpblendmw %ymm6, %ymm7, %ymm11 {%k2}
 ; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm12
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[2,9,u,u,u,u,u]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,1,8,15],zero,zero,xmm11[4,11],zero,zero,xmm11[u,u,u,u,u]
@@ -4395,7 +4406,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-FAST-NEXT:    movl $-134217728, %edi # imm = 0xF8000000
 ; AVX512BW-FAST-NEXT:    kmovd %edi, %k5
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm14, %ymm13 {%k5}
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm5, %ymm6, %ymm14 {%k2}
+; AVX512BW-FAST-NEXT:    vpblendmw %ymm2, %ymm3, %ymm14 {%k3}
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[3,10],zero,zero,zero,xmm14[6,13],zero,zero,xmm14[u,u,u,u,u,u,u]
 ; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm14
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = zero,zero,xmm14[1,8,15],zero,zero,xmm14[4,11,u,u,u,u,u,u,u]
@@ -4403,7 +4414,7 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm14 {%k4} = ymm9[u,u,u,u,u,u,u,u,u,2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm14[0],ymm13[1,2,3,4,5,6,7],ymm14[8],ymm13[9,10,11,12,13,14,15]
 ; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm9 = ymm14[0,1,2,3],ymm9[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm2, %ymm3, %ymm13 {%k1}
+; AVX512BW-FAST-NEXT:    vpblendmw %ymm6, %ymm7, %ymm13 {%k1}
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm13[u,u,2,9],zero,zero,zero,xmm13[5,12],zero,zero,xmm13[u,u,u,u,u]
 ; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm13
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u],zero,zero,xmm13[0,7,14],zero,zero,xmm13[3,10,u,u,u,u,u]
@@ -4414,15 +4425,15 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-FAST-NEXT:    vpor %xmm14, %xmm15, %xmm14
 ; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm14, %ymm13 {%k5}
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm6, %ymm5, %ymm14 {%k3}
+; AVX512BW-FAST-NEXT:    vpblendmw %ymm3, %ymm2, %ymm14 {%k2}
 ; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm15
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,xmm15[2,9],zero,zero,zero,xmm15[5,12,u,u,u,u,u,u,u]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[4,11],zero,zero,xmm14[0,7,14],zero,zero,xmm14[u,u,u,u,u,u,u]
 ; AVX512BW-FAST-NEXT:    vpor %xmm15, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm14 {%k4} = ymm7[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm14[0],ymm13[1,2,3,4,5,6,7],ymm14[8],ymm13[9,10,11,12,13,14,15]
-; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm7 = ymm14[0,1,2,3],ymm7[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm2, %ymm3, %ymm13 {%k2}
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm14 {%k4} = ymm5[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm14[0],ymm13[1,2,3,4,5,6,7],ymm14[8],ymm13[9,10,11,12,13,14,15]
+; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm5 = ymm14[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vpblendmw %ymm6, %ymm7, %ymm13 {%k3}
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm13[u,u,3,10],zero,zero,zero,xmm13[6,13],zero,zero,xmm13[u,u,u,u,u]
 ; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm13, %xmm13
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u],zero,zero,xmm13[1,8,15],zero,zero,xmm13[4,11,u,u,u,u,u]
@@ -4433,39 +4444,39 @@ define void @load_i8_stride7_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-FAST-NEXT:    vpor %xmm14, %xmm15, %xmm14
 ; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm14, %ymm13 {%k5}
-; AVX512BW-FAST-NEXT:    vpblendmw %ymm6, %ymm5, %ymm14 {%k1}
+; AVX512BW-FAST-NEXT:    vpblendmw %ymm3, %ymm2, %ymm14 {%k1}
 ; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm15
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,xmm15[3,10],zero,zero,zero,xmm15[6,13,u,u,u,u,u,u,u]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[5,12],zero,zero,xmm14[1,8,15],zero,zero,xmm14[u,u,u,u,u,u,u]
 ; AVX512BW-FAST-NEXT:    vpor %xmm15, %xmm14, %xmm14
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm14 {%k4} = ymm1[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm14[0],ymm13[1,2,3,4,5,6,7],ymm14[8],ymm13[9,10,11,12,13,14,15]
-; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm14[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm14 {%k4} = ymm4[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm14[0],ymm13[1,2,3,4,5,6,7],ymm14[8],ymm13[9,10,11,12,13,14,15]
+; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm14[0,1,2,3],ymm4[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm7, %ymm6 {%k2}
+; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm7
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u],zero,zero,xmm7[2,9],zero,zero,zero,xmm7[5,12,u,u,u,u,u]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,4,11],zero,zero,xmm6[0,7,14],zero,zero,xmm6[u,u,u,u,u]
+; AVX512BW-FAST-NEXT:    vpor %xmm7, %xmm6, %xmm6
+; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm12[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm11[1,8,15]
+; AVX512BW-FAST-NEXT:    vpor %xmm7, %xmm11, %xmm7
+; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm7, %ymm6 {%k5}
 ; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm3, %ymm2 {%k3}
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[5,12,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,4,11],zero,zero,xmm2[0,7,14],zero,zero,xmm2[u,u,u,u,u]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm2[6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u,u,u,u]
+; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u,u,u,u]
 ; AVX512BW-FAST-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm12[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm11[1,8,15]
-; AVX512BW-FAST-NEXT:    vpor %xmm3, %xmm11, %xmm3
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm3, %ymm2 {%k5}
-; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm6, %ymm5 {%k2}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm5[6,13],zero,zero,xmm5[2,9],zero,zero,zero,xmm5[u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm5
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,xmm5[4,11],zero,zero,xmm5[0,7,14,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpor %xmm3, %xmm5, %xmm3
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm3 {%k4} = ymm0[u,u,u,u,u,u,u,u,u,5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0],ymm2[1,2,3,4,5,6,7],ymm3[8],ymm2[9,10,11,12,13,14,15]
-; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm4, (%rsi)
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm2 {%k4} = ymm0[u,u,u,u,u,u,u,u,u,5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm6[1,2,3,4,5,6,7],ymm2[8],ymm6[9,10,11,12,13,14,15]
+; AVX512BW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vmovdqa %ymm1, (%rsi)
 ; AVX512BW-FAST-NEXT:    vmovdqa %ymm8, (%rdx)
 ; AVX512BW-FAST-NEXT:    vmovdqa %ymm10, (%rcx)
 ; AVX512BW-FAST-NEXT:    vmovdqa %ymm9, (%r8)
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm7, (%r9)
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm1, (%r10)
+; AVX512BW-FAST-NEXT:    vmovdqa %ymm5, (%r9)
+; AVX512BW-FAST-NEXT:    vmovdqa %ymm4, (%r10)
 ; AVX512BW-FAST-NEXT:    vmovdqa %ymm0, (%rax)
 ; AVX512BW-FAST-NEXT:    vzeroupper
 ; AVX512BW-FAST-NEXT:    retq
@@ -8917,70 +8928,71 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ;
 ; AVX512F-ONLY-SLOW-LABEL: load_i8_stride7_vf64:
 ; AVX512F-ONLY-SLOW:       # %bb.0:
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
+; AVX512F-ONLY-SLOW-NEXT:    pushq %rax
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm24
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 32(%rdi), %ymm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm22
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm13, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm16, %ymm24, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 32(%rdi), %ymm29
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm29, %ymm24, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[5,12],zero,zero,xmm1[1,8,15,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm0, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm22, %ymm10, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm11
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm11, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7,8,9],ymm3[10],ymm1[11,12],ymm3[13],ymm1[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm3, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7,8,9],ymm3[10],ymm2[11,12],ymm3[13],ymm2[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm3, %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,13,4,11,2,9,16,23,30,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 128(%rdi), %ymm31
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 160(%rdi), %ymm18
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm18, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 160(%rdi), %ymm16
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm16, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[6,13,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,5,12],zero,zero,xmm2[1,8,15],zero,zero,xmm2[u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,5,12],zero,zero,xmm1[1,8,15],zero,zero,xmm1[u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm6, %xmm8, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm28
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm9[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm11[0],xmm3[1],xmm11[1],xmm3[2],xmm11[2],xmm3[3],xmm11[3]
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm25 = [0,1,2,11,0,1,2,11]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %ymm3, %ymm25, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm28
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm7[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1],xmm3[2],xmm10[2],xmm3[3],xmm10[3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm3[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm5[5,12,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm5, %xmm12
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm11
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm11[0,7,14],zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm25
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm10
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm10[0,7,14],zero,zero,xmm10[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm14, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm2, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm23, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm1, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm22, %zmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 288(%rdi), %ymm19
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm0, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm19, %ymm14, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[u,u,u,u,u,3,10],zero,zero,zero,xmm1[6,13],zero,zero,xmm1[u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[u,u,u,u,u,3,10],zero,zero,zero,xmm1[6,13],zero,zero,xmm1[u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u],zero,zero,xmm1[1,8,15],zero,zero,xmm1[4,11,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm1, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 352(%rdi), %ymm17
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm3[2,3,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm5[2],ymm3[3,4,5],ymm5[6],ymm3[7,8,9],ymm5[10],ymm3[11,12,13],ymm5[14],ymm3[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm2[2,3,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm5[2],ymm2[3,4,5],ymm5[6],ymm2[7,8,9],ymm5[10],ymm2[11,12,13],ymm5[14],ymm2[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm26, %ymm1, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm26, %ymm1, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 416(%rdi), %ymm20
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 384(%rdi), %ymm21
@@ -8991,93 +9003,100 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm5[4,11],zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm6, %xmm5, %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm3, %ymm27, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm29, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm9, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm30, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm1, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm16, %ymm24, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm29, %ymm24, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[6,13],zero,zero,xmm3[2,9,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm13, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm10, %ymm22, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm7[2],ymm3[3,4,5],ymm7[6],ymm3[7,8,9],ymm7[10],ymm3[11,12,13],ymm7[14],ymm3[15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm11, %ymm18, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm13[2],ymm3[3,4,5],ymm13[6],ymm3[7,8,9],ymm13[10],ymm3[11,12,13],ymm13[14],ymm3[15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm13, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm18, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm16, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm2[u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm9[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm7[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm8[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %ymm5, %ymm25, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm12[6,13,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm11[1,8,15],zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm5[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm13
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm13[6,13,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm10[1,8,15],zero,zero,xmm10[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm6, %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm5, %zmm2, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm23, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm22, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm1, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm14, %ymm19, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u],zero,zero,xmm5[2,9],zero,zero,zero,xmm5[5,12,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,4,11],zero,zero,xmm3[0,7,14],zero,zero,xmm3[u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm3, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm0, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm5[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7,8,9,10],ymm6[11],ymm5[12,13],ymm6[14],ymm5[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm26, %ymm3, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm3[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm3[5,12],zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,xmm3[0,7,14],zero,zero,xmm3[3,10]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm6, %xmm3, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm27, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm29, %zmm23
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm16, %ymm24, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm9, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm22
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm30, %zmm22
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm29, %ymm24, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm2[2,9],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm2, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm1, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm10, %ymm22, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm7[3],ymm3[4,5],ymm7[6],ymm3[7,8,9,10],ymm7[11],ymm3[12,13],ymm7[14],ymm3[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm11, %ymm18, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7,8,9,10],ymm0[11],ymm3[12,13],ymm0[14],ymm3[15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm5, %ymm2, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm30
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm13, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm31, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm27
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm16, %ymm31, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm6
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,zero,xmm6[5,12],zero,zero,xmm6[1,8,15,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,0,7,14],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm6, %xmm2, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm5, %xmm9, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm5, %xmm7, %xmm6
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm8[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermt2d %ymm5, %ymm25, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm11[2,9],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,xmm12[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm5[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm10[2,9],zero,zero,zero,xmm10[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,xmm13[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm6, %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm5, %zmm2, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm28, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm14, %ymm19, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[6,13,u,u]
@@ -9087,58 +9106,59 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm15, %ymm17, %ymm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm5[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1,2],ymm6[3],ymm5[4,5,6],ymm6[7,8],ymm5[9,10],ymm6[11],ymm5[12,13,14],ymm6[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm26, %ymm3, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm0, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm3[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm3[6,13],zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,xmm3[1,8,15],zero,zero,xmm3[4,11]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm6, %xmm3, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm27, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm9, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm29, %zmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm16, %ymm24, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm30, %zmm25
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm29, %ymm24, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm2[3,10],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm10, %ymm22, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm7[0],ymm3[1,2],ymm7[3],ymm3[4,5,6],ymm7[7,8],ymm3[9,10],ymm7[11],ymm3[12,13,14],ymm7[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm30, %ymm2, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm11, %ymm18, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm0[0],ymm3[1,2],ymm0[3],ymm3[4,5,6],ymm0[7,8],ymm3[9,10],ymm0[11],ymm3[12,13,14],ymm0[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[2,9,0,7,14,5,12,19,26],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm2, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm1, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm31, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm16, %ymm31, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u],zero,zero,zero,xmm5[6,13],zero,zero,xmm5[2,9,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm9[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm9[5,12]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm7[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm7[5,12]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm8[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm8, %xmm7
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm6, %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm11[3,10],zero,zero,zero,xmm11[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,xmm12[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm27, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm10[3,10],zero,zero,zero,xmm10[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,xmm13[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm6, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm5, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm28, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm0, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm14, %ymm19, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm3[u,u,u,u,u,6,13],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u],zero,zero,xmm3[4,11],zero,zero,xmm3[0,7,14,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm3, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm15, %ymm17, %ymm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm5[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1,2,3],ymm6[4],ymm5[5,6],ymm6[7,8],ymm5[9,10,11],ymm6[12],ymm5[13,14],ymm6[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm26, %ymm3, %ymm5
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm1, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm3
@@ -9147,140 +9167,140 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,4,11],zero,zero,xmm3[0,7,14],zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm6, %xmm3, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm27, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm9, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm28
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm29, %zmm28
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm30, %zmm28
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm1, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm24, %ymm16, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm24, %ymm29, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[2,9],zero,zero,zero,xmm3[5,12,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,11],zero,zero,xmm2[0,7,14],zero,zero,xmm2[u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vporq %xmm3, %xmm2, %xmm29
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm13, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm2, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm19, %ymm14, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u],zero,zero,zero,xmm3[5,12],zero,zero,xmm3[1,8,15,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,0,7,14],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm0, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm15, %ymm17, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm3[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3],ymm5[4],ymm3[5,6,7,8],ymm5[9],ymm3[10,11],ymm5[12],ymm3[13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm26, %ymm2, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[6,13]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,5,12],zero,zero,xmm2[1,8,15],zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm2, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm2, %ymm0, %ymm26
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm3, %ymm27, %ymm26
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm3, %ymm9, %ymm26
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm1, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm19, %ymm14, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u],zero,zero,zero,xmm3[6,13],zero,zero,xmm3[2,9,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm13, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm3[2,3,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3,4],ymm5[5],ymm3[6,7,8],ymm5[9],ymm3[10,11,12],ymm5[13],ymm3[14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm0, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm2[u,u,u,u,u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm2, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm2, %ymm0, %ymm30
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm3, %ymm27, %ymm30
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm31, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm3, %ymm9, %ymm30
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm16, %ymm31, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm2[u,u,2,9],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm2, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm24, %ymm16, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm24, %ymm16, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm23
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm12, %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm24, %ymm29, %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm24, %ymm29, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm9[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm9[6,13]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm7, %xmm12
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm7, %xmm13
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm7, %xmm8, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm10, %ymm22, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %ymm19, %ymm4, %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm11, %ymm18, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %ymm19, %ymm12, %ymm14
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm1, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm22, %ymm10, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm22, %ymm10, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,xmm10[3,10],zero,zero,zero,xmm10[6,13,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[5,12],zero,zero,xmm6[1,8,15],zero,zero,xmm6[u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm6, %xmm10, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm10[0],ymm2[1,2,3],ymm10[4],ymm2[5,6],ymm10[7,8],ymm2[9,10,11],ymm10[12],ymm2[13,14],ymm10[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm16, %ymm29, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm11, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm11, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm11[0],ymm2[1,2,3],ymm11[4],ymm2[5,6],ymm11[7,8],ymm2[9,10,11],ymm11[12],ymm2[13,14],ymm11[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[3,10,1,8,15,6,13,20,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm18, %ymm0, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm19, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm8[0],ymm10[1],ymm8[2,3],ymm10[4],ymm8[5,6,7,8],ymm10[9],ymm8[10,11],ymm10[12],ymm8[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm16, %ymm6, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm0[6,13],zero,zero,xmm0[2,9],zero,zero,zero,xmm0[u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm27, %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,xmm5[3,10],zero,zero,zero,xmm5[6,13,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[5,12],zero,zero,xmm6[1,8,15],zero,zero,xmm6[u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm6, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm8[0],ymm11[1],ymm8[2,3],ymm11[4],ymm8[5,6,7,8],ymm11[9],ymm8[10,11],ymm11[12],ymm8[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[4,11,2,9,0,7,14,21,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm18, %ymm5, %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm4[6,13],zero,zero,xmm4[2,9],zero,zero,zero,xmm4[u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm6, %xmm0, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm4[0],ymm10[1],ymm4[2,3,4],ymm10[5],ymm4[6,7,8],ymm10[9],ymm4[10,11,12],ymm10[13],ymm4[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm12[0],ymm11[1],ymm12[2,3,4],ymm11[5],ymm12[6,7,8],ymm11[9],ymm12[10,11,12],ymm11[13],ymm12[14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm16, %ymm0, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm11, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[5,12,3,10,1,8,15,22,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm18, %ymm0, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm10, %xmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm4[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm4[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm7, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm7, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm31, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm16, %ymm31, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm3[u,u,3,10],zero,zero,zero,xmm3[6,13],zero,zero,xmm3[u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,xmm3[1,8,15],zero,zero,xmm3[4,11,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm3, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %ymm17, %ymm1, %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm18, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm12[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm9[0,7,14]
-; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm10, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm16, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm13[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm9[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm9[0,7,14]
+; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm11, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm19, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm27, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[5,12,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,4,11],zero,zero,xmm1[0,7,14],zero,zero,xmm1[u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm1, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm12[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm13[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm9[1,8,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm9, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm19, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm27, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm4[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm11[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm10[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm9[0],xmm1[0],xmm9[1],xmm1[1],xmm9[2],xmm1[2],xmm9[3],xmm1[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm7, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm6, %zmm7, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm4, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm11[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm10[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm6, %zmm7, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm7, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm26, %zmm0, %zmm3
 ; AVX512F-ONLY-SLOW-NEXT:    movw $-512, %ax # imm = 0xFE00
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
@@ -9295,9 +9315,10 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm15[0,1],ymm5[2],ymm15[3,4],ymm5[5],ymm15[6,7,8,9],ymm5[10],ymm15[11,12],ymm5[13],ymm15[14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[1,8,15,22,29,20,27,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm23, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u],zero,zero,zero,xmm3[5,12],zero,zero,xmm3[1,8,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm13[u,u,u,u,u,u,0,7,14],zero,zero,xmm13[3,10],zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,0,7,14],zero,zero,xmm4[3,10],zero,zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm4, %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0,1,2],ymm3[3,4,5,6,7],ymm5[8,9,10],ymm3[11,12,13,14,15]
@@ -9306,7 +9327,7 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm3, %zmm2 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512F-ONLY-SLOW-NEXT:    vmovaps %zmm3, (%rsi)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm23, (%rdx)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, (%rdx)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, (%rcx)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm28, (%r8)
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, (%r9)
@@ -9314,232 +9335,236 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, (%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, (%rax)
+; AVX512F-ONLY-SLOW-NEXT:    popq %rax
 ; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
 ; AVX512F-ONLY-SLOW-NEXT:    retq
 ;
 ; AVX512F-ONLY-FAST-LABEL: load_i8_stride7_vf64:
 ; AVX512F-ONLY-FAST:       # %bb.0:
+; AVX512F-ONLY-FAST-NEXT:    pushq %rax
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %ymm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 32(%rdi), %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rdi), %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 32(%rdi), %ymm16
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %ymm25
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm15, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm22, %ymm16, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm16, %ymm20, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm1[5,12],zero,zero,xmm1[1,8,15,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm1, %xmm0, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdi), %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm25, %ymm10, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 80(%rdi), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7,8,9],ymm0[10],ymm2[11,12],ymm0[13],ymm2[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdi), %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm25, %ymm11, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 80(%rdi), %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7,8,9],ymm3[10],ymm2[11,12],ymm3[13],ymm2[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,13,4,11,2,9,16,23,30,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 128(%rdi), %ymm26
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 160(%rdi), %ymm31
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm12, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm26, %ymm31, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[6,13,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[6,13,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,5,12],zero,zero,xmm1[1,8,15],zero,zero,xmm1[u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm3, %xmm1, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,1,2,4,6>
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,1,2,4,6>
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdi), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm3, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,23,26,29]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,1,2,15,0,1,2,15]
-; AVX512F-ONLY-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm8, %ymm3, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 240(%rdi), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm0[5,12,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 224(%rdi), %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm8[0,7,14],zero,zero,xmm8[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm11, %xmm13, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm11, %zmm4, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm20, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm5, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,23,26,29]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 240(%rdi), %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm6[5,12,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm6, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 224(%rdi), %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm6[0,7,14],zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm6, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm5, %xmm10, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm2, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm8, %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 288(%rdi), %ymm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm11, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm2[u,u,u,u,u,3,10],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[u,u]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm2, %xmm13, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 352(%rdi), %ymm29
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm10, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm4[u,u,u,u,u,3,10],zero,zero,zero,xmm4[6,13],zero,zero,xmm4[u,u]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u],zero,zero,xmm4[1,8,15],zero,zero,xmm4[4,11,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm4, %xmm13, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 352(%rdi), %ymm22
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 320(%rdi), %ymm13
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm12, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm29, %ymm13, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm14[2,3,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm14[0,1],ymm6[2],ymm14[3,4,5],ymm6[6],ymm14[7,8,9],ymm6[10],ymm14[11,12,13],ymm6[14],ymm14[15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm22, %ymm13, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm14[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm14[0,1],ymm2[2],ymm14[3,4,5],ymm2[6],ymm14[7,8,9],ymm2[10],ymm14[11,12,13],ymm2[14],ymm14[15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm24, %ymm2, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm24, %ymm4, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 416(%rdi), %ymm18
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 384(%rdi), %ymm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm18, %ymm19, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u],zero,zero,zero,xmm6[6,13],zero,zero,xmm6[2,9]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm4[4,11],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm2, %ymm27, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm29, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm18, %ymm19, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u],zero,zero,zero,xmm7[6,13],zero,zero,xmm7[2,9]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm2[4,11],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm7, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm21, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm30, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm22, %ymm16, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm16, %ymm20, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[6,13],zero,zero,xmm4[2,9,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm4, %xmm2, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm15, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm25, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm0[2],ymm4[3,4,5],ymm0[6],ymm4[7,8,9],ymm0[10],ymm4[11,12,13],ymm0[14],ymm4[15]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm25, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm3[2],ymm4[3,4,5],ymm3[6],ymm4[7,8,9],ymm3[10],ymm4[11,12,13],ymm3[14],ymm4[15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm26, %ymm31, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm2[u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm2[u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm7, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,1,3,4,6>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm7, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,17,20,27,30]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm7, %ymm3, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,zero,xmm9[6,13,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm8[1,8,15],zero,zero,xmm8[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm7, %xmm6, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm2, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm20, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm5, %xmm2, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,1,3,4,6>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm5, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,17,20,27,30]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm7[6,13,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm9[1,8,15],zero,zero,xmm9[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm5, %xmm6, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm2, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm8, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm17, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u],zero,zero,xmm6[2,9],zero,zero,zero,xmm6[5,12,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm17, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u],zero,zero,xmm5[2,9],zero,zero,zero,xmm5[5,12,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,4,11],zero,zero,xmm4[0,7,14],zero,zero,xmm4[u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm29, %ymm13, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1,2],ymm7[3],ymm6[4,5],ymm7[6],ymm6[7,8,9,10],ymm7[11],ymm6[12,13],ymm7[14],ymm6[15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm24, %ymm4, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm22, %ymm13, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm5[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7,8,9,10],ymm6[11],ymm5[12,13],ymm6[14],ymm5[15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm24, %ymm4, %ymm5
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm12, %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm18, %ymm19, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm4[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm4[5,12],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm4[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm4[5,12],zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u],zero,zero,xmm4[0,7,14],zero,zero,xmm4[3,10]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm7, %xmm4, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm6, %xmm4, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm21, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm30, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm5, %ymm27, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm29, %zmm21
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm12, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm22, %ymm16, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm16, %ymm20, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[2,9],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm4, %xmm2, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm25, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1,2],ymm0[3],ymm4[4,5],ymm0[6],ymm4[7,8,9,10],ymm0[11],ymm4[12,13],ymm0[14],ymm4[15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm6, %ymm2, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm6, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm25, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1,2],ymm3[3],ymm4[4,5],ymm3[6],ymm4[7,8,9,10],ymm3[11],ymm4[12,13],ymm3[14],ymm4[15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm5, %ymm2, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm15, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm31, %ymm26, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,zero,xmm6[5,12],zero,zero,xmm6[1,8,15,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u],zero,zero,zero,xmm5[5,12],zero,zero,xmm5[1,8,15,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,0,7,14],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm6, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,1,3,5,6>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm6, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm5, %xmm2, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,1,3,5,6>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm5, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,21,24,31]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm1, %ymm3, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm8[2,9],zero,zero,zero,xmm8[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm9[2,9],zero,zero,zero,xmm9[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm9, %xmm8
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm9[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm1, %xmm3, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm7, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,xmm7[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm2, %xmm5, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm1, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm28, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm12, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm17, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[6,13,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm17, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[6,13,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,5,12],zero,zero,xmm2[1,8,15],zero,zero,xmm2[u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm13, %ymm29, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1,2],ymm4[3],ymm3[4,5,6],ymm4[7,8],ymm3[9,10],ymm4[11],ymm3[12,13,14],ymm4[15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm24, %ymm2, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm4, %xmm2, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm13, %ymm22, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5,6],ymm5[7,8],ymm4[9,10],ymm5[11],ymm4[12,13,14],ymm5[15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm24, %ymm2, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm18, %ymm19, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm2[6,13],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm2[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm2[6,13],zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm4, %xmm2, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm5, %xmm2, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm3, %ymm21, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm4, %ymm27, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm30, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm22, %ymm16, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm29, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm16, %ymm20, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[3,10],zero,zero,zero,xmm1[6,13],zero,zero,xmm1[u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,8,15],zero,zero,xmm1[4,11,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm2, %xmm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm12, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm25, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0],ymm2[1,2],ymm0[3],ymm2[4,5,6],ymm0[7,8],ymm2[9,10],ymm0[11],ymm2[12,13,14],ymm0[15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm7, %ymm1, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm25, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1,2],ymm3[3],ymm2[4,5,6],ymm3[7,8],ymm2[9,10],ymm3[11],ymm2[12,13,14],ymm3[15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,9,0,7,14,5,12,19,26],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm6, %ymm1, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm31, %ymm26, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,zero,xmm3[6,13],zero,zero,xmm3[2,9,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,zero,xmm4[6,13],zero,zero,xmm4[2,9,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,1,8,15],zero,zero,xmm1[4,11],zero,zero,xmm1[u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm3, %xmm1, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 208(%rdi), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm1[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm1[5,12]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdi), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm3[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm4, %xmm1, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 208(%rdi), %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[5,12]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 192(%rdi), %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm6, %xmm7, %xmm6
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm4, %ymm27, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm8[3,10],zero,zero,zero,xmm8[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,xmm9[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm4, %xmm7, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm6, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm28, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm17, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm17, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm2[u,u,u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm6, %xmm2, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm12, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm13, %ymm29, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm13, %ymm22, %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0],ymm6[1,2,3],ymm7[4],ymm6[5,6],ymm7[7,8],ymm6[9,10,11],ymm7[12],ymm6[13,14],ymm7[15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm24, %ymm2, %ymm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm19, %ymm18, %ymm2
@@ -9548,23 +9573,23 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,4,11],zero,zero,xmm2[0,7,14],zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm7, %xmm2, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm21, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm27, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm30, %zmm28
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm29, %zmm28
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm16, %ymm22, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm20, %ymm16, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[2,9],zero,zero,zero,xmm4[5,12,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,11],zero,zero,xmm2[0,7,14],zero,zero,xmm2[u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm4, %xmm2, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vporq %xmm4, %xmm2, %xmm29
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm15, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm11, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm10, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u],zero,zero,zero,xmm4[5,12],zero,zero,xmm4[1,8,15,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,0,7,14],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm4, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm13, %ymm29, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm13, %ymm22, %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm4[2,3,0,1]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2,3],ymm6[4],ymm4[5,6,7,8],ymm6[9],ymm4[10,11],ymm6[12],ymm4[13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
@@ -9576,137 +9601,138 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,5,12],zero,zero,xmm2[1,8,15],zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm6, %xmm2, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm2, %ymm0, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm4, %ymm21, %ymm24
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm4, %ymm27, %ymm24
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm11, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm10, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u],zero,zero,zero,xmm4[6,13],zero,zero,xmm4[2,9,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm4, %xmm2, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm15, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm29, %ymm13, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm22, %ymm13, %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm4[2,3,0,1]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2,3,4],ymm6[5],ymm4[6,7,8],ymm6[9],ymm4[10,11,12],ymm6[13],ymm4[14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm19, %ymm18, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm2[u,u,u,u,u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14]
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm6, %xmm2, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm2, %ymm0, %ymm30
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm4, %ymm21, %ymm30
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm4, %ymm27, %ymm30
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm12, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm31, %ymm26, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[u,u,2,9],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vporq %xmm4, %xmm2, %xmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm4, %xmm2, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm18, %ymm19, %ymm15
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm12, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm16, %ymm22, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm5, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm16, %ymm22, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm1[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm1[6,13]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm3[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm6, %xmm8, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm25, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %ymm17, %ymm12, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm25, %ymm10, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm25, %ymm10, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,xmm10[3,10],zero,zero,zero,xmm10[6,13,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm20, %ymm16, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm20, %ymm16, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[6,13]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm1[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm8, %xmm9, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm25, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %ymm17, %ymm12, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm25, %ymm11, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm25, %ymm11, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm7, %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[3,10],zero,zero,zero,xmm11[6,13,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[5,12],zero,zero,xmm7[1,8,15],zero,zero,xmm7[u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm7, %xmm10, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0],ymm2[1,2,3],ymm0[4],ymm2[5,6],ymm0[7,8],ymm2[9,10,11],ymm0[12],ymm2[13,14],ymm0[15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm16, %ymm9, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm21, %ymm0, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm7, %xmm11, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm11[0],ymm2[1,2,3],ymm11[4],ymm2[5,6],ymm11[7,8],ymm2[9,10,11],ymm11[12],ymm2[13,14],ymm11[15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[3,10,1,8,15,6,13,20,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm3, %ymm29, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm10, %ymm27, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm0[1],ymm8[2,3],ymm0[4],ymm8[5,6,7,8],ymm0[9],ymm8[10,11],ymm0[12],ymm8[13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm0, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm16, %ymm7, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm5[6,13],zero,zero,xmm5[2,9],zero,zero,zero,xmm5[u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm16, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm9[0],ymm11[1],ymm9[2,3],ymm11[4],ymm9[5,6,7,8],ymm11[9],ymm9[10,11],ymm11[12],ymm9[13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[4,11,2,9,0,7,14,21,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm3, %ymm7, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm0[6,13],zero,zero,xmm0[2,9],zero,zero,zero,xmm0[u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14,u,u,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm7, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm12[0],ymm10[1],ymm12[2,3,4],ymm10[5],ymm12[6,7,8],ymm10[9],ymm12[10,11,12],ymm10[13],ymm12[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm16, %ymm0, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm9, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm5[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm6, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm6, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm12[0],ymm11[1],ymm12[2,3,4],ymm11[5],ymm12[6,7,8],ymm11[9],ymm12[10,11,12],ymm11[13],ymm12[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm7[5,12,3,10,1,8,15,22,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm3, %ymm0, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm12, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm8, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm31, %ymm26, %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm4[u,u,3,10],zero,zero,zero,xmm4[6,13],zero,zero,xmm4[u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,xmm4[1,8,15],zero,zero,xmm4[4,11,u,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpor %xmm2, %xmm4, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %ymm29, %ymm14, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %ymm22, %ymm14, %ymm13
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $202, %ymm26, %ymm31, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm1[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm1[0,7,14]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm4, %xmm12, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm1[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[0,7,14]
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm4, %xmm11, %xmm4
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm2, %ymm27, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm2, %ymm16, %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[5,12,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm14[u,u,4,11],zero,zero,xmm14[0,7,14],zero,zero,xmm14[u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm2, %xmm12, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm1[1,8,15]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm14[u,u,4,11],zero,zero,xmm14[0,7,14],zero,zero,xmm14[u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm2, %xmm11, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,8,15]
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm1, %xmm5, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm2, %ymm27, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm5[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm9[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %ymm2, %ymm16, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm3[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm12[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm4, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm8, %zmm6, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm9[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm7, %zmm6, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm8, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm12[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm7, %zmm8, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    movw $-512, %ax # imm = 0xFE00
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm0 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm2 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm11[u,u,u,u,2,9],zero,zero,zero,xmm11[5,12],zero,zero,xmm11[u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm11, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u],zero,zero,xmm4[0,7,14],zero,zero,xmm4[3,10,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm13[2,3,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm13[0,1],ymm4[2],ymm13[3,4],ymm4[5],ymm13[6,7,8,9],ymm4[10],ymm13[11,12],ymm4[13],ymm13[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[1,8,15,22,29,20,27,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u],zero,zero,zero,xmm3[5,12],zero,zero,xmm3[1,8,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm15[u,u,u,u,u,u,0,7,14],zero,zero,xmm15[3,10],zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm3, %xmm5, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm4, %zmm0 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm4, %zmm2 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm10[u,u,u,u,2,9],zero,zero,zero,xmm10[5,12],zero,zero,xmm10[u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,xmm5[0,7,14],zero,zero,xmm5[3,10,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm4, %xmm5, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm13[2,3,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm13[0,1],ymm5[2],ymm13[3,4],ymm5[5],ymm13[6,7,8,9],ymm5[10],ymm13[11,12],ymm5[13],ymm13[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[1,8,15,22,29,20,27,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u],zero,zero,zero,xmm4[5,12],zero,zero,xmm4[1,8,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm15[u,u,u,u,u,u,0,7,14],zero,zero,xmm15[3,10],zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm4, %xmm3, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0,1,2],ymm3[3,4,5,6,7],ymm5[8,9,10],ymm3[11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm1 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovaps %zmm3, (%rsi)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, (%rdx)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, (%rdx)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, (%rcx)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, (%r8)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, (%r9)
@@ -9714,393 +9740,407 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
+; AVX512F-ONLY-FAST-NEXT:    popq %rax
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
 ; AVX512DQ-SLOW-LABEL: load_i8_stride7_vf64:
 ; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm23
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm26
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm22
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm23, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm18
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm26, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,7,14],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm1, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm22, %ymm10, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm1, %xmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm11
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm11, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm0
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm26
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7,8,9],ymm0[10],ymm2[11,12],ymm0[13],ymm2[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm28
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,13,4,11,2,9,16,23,30,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 128(%rdi), %ymm31
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 160(%rdi), %ymm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm18, %ymm2
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 160(%rdi), %ymm16
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm16, %ymm1
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm7
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u],zero,zero,xmm7[3,10],zero,zero,zero,xmm7[6,13,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,5,12],zero,zero,xmm2[1,8,15],zero,zero,xmm2[u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm7, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm4
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm4[u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm7
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm7[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm11[0],xmm13[0],xmm11[1],xmm13[1],xmm11[2],xmm13[2],xmm11[3],xmm13[3]
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm25 = [0,1,2,11,0,1,2,11]
-; AVX512DQ-SLOW-NEXT:    # ymm25 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermt2d %ymm11, %ymm25, %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm11
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = zero,zero,zero,xmm11[5,12,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm13
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm13[0,7,14],zero,zero,xmm13[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,5,12],zero,zero,xmm1[1,8,15],zero,zero,xmm1[u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm7, %xmm1, %xmm1
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm3
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm3[u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm0, %xmm6
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm10[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = zero,zero,zero,xmm0[5,12,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm0, %xmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm12
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm12[0,7,14],zero,zero,xmm12[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpor %xmm14, %xmm15, %xmm14
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm14, %zmm2, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm28, %zmm2
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm14, %zmm1, %zmm23
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm22, %zmm23
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 288(%rdi), %ymm19
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm19, %ymm14, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm1[u,u,u,u,u,3,10],zero,zero,zero,xmm1[6,13],zero,zero,xmm1[u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[u,u,u,u,u,3,10],zero,zero,zero,xmm1[6,13],zero,zero,xmm1[u,u]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u],zero,zero,xmm1[1,8,15],zero,zero,xmm1[4,11,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm1, %xmm15, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm1, %xmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 352(%rdi), %ymm17
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm12
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm12
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm12[2,3,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm12[0,1],ymm5[2],ymm12[3,4,5],ymm5[6],ymm12[7,8,9],ymm5[10],ymm12[11,12,13],ymm5[14],ymm12[15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4,5],ymm4[6],ymm2[7,8,9],ymm4[10],ymm2[11,12,13],ymm4[14],ymm2[15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm1, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm1, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 416(%rdi), %ymm20
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 384(%rdi), %ymm21
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm12
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm12
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u],zero,zero,zero,xmm6[6,13],zero,zero,xmm6[2,9]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm12[4,11],zero,zero
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm12, %xmm6
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm4
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u],zero,zero,zero,xmm5[6,13],zero,zero,xmm5[2,9]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm4[4,11],zero,zero
+; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm24, %ymm6
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm29, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm24, %ymm4
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm23, %zmm30, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm23, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm26, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[6,13],zero,zero,xmm5[2,9,u,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm10, %ymm22, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm0[2],ymm5[3,4,5],ymm0[6],ymm5[7,8,9],ymm0[10],ymm5[11,12,13],ymm0[14],ymm5[15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm5
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm11, %ymm18, %ymm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm1[2],ymm5[3,4,5],ymm1[6],ymm5[7,8,9],ymm1[10],ymm5[11,12,13],ymm1[14],ymm5[15]
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm18, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm2[u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm16, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm2[u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm4[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm12[0],xmm6[0],xmm12[1],xmm6[1],xmm12[2],xmm6[2],xmm12[3],xmm6[3]
-; AVX512DQ-SLOW-NEXT:    vpermt2d %ymm6, %ymm25, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm11[6,13,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm13[1,8,15],zero,zero,xmm13[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm12, %xmm6
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm2, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm28, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm14, %ymm19, %ymm5
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u],zero,zero,xmm6[2,9],zero,zero,zero,xmm6[5,12,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,4,11],zero,zero,xmm5[0,7,14],zero,zero,xmm5[u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm6[2,3,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1,2],ymm12[3],ymm6[4,5],ymm12[6],ymm6[7,8,9,10],ymm12[11],ymm6[12,13],ymm12[14],ymm6[15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm5, %ymm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm5[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm5[5,12],zero,zero
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u],zero,zero,xmm5[0,7,14],zero,zero,xmm5[3,10]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm12, %xmm5
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm6, %ymm24, %ymm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm26
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm29, %zmm26
+; AVX512DQ-SLOW-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm6[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm7[6,13,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm12[1,8,15],zero,zero,xmm12[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm4, %xmm2
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm22, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm14, %ymm19, %ymm2
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u],zero,zero,xmm4[2,9],zero,zero,zero,xmm4[5,12,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,4,11],zero,zero,xmm2[0,7,14],zero,zero,xmm2[u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm4, %xmm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7,8,9,10],ymm5[11],ymm4[12,13],ymm5[14],ymm4[15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm2, %ymm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm23, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm2[2,9],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm2[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm2[5,12],zero,zero
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10]
 ; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm10, %ymm22, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1,2],ymm0[3],ymm5[4,5],ymm0[6],ymm5[7,8,9,10],ymm0[11],ymm5[12,13],ymm0[14],ymm5[15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm5, %ymm2, %ymm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm30
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm31, %ymm2
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm12
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u],zero,zero,zero,xmm12[5,12],zero,zero,xmm12[1,8,15,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,0,7,14],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm12, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm7[u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm4[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3]
-; AVX512DQ-SLOW-NEXT:    vpermt2d %ymm5, %ymm25, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm13[2,9],zero,zero,zero,xmm13[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = zero,zero,xmm11[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm12, %xmm5
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm5, %zmm2, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm6, %zmm28, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm14, %ymm19, %ymm6
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm12
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u,u,u],zero,zero,xmm12[3,10],zero,zero,zero,xmm12[6,13,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,5,12],zero,zero,xmm6[1,8,15],zero,zero,xmm6[u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm12, %xmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm12
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm15, %ymm17, %ymm12
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm12[2,3,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm12[1,2],ymm2[3],ymm12[4,5,6],ymm2[7,8],ymm12[9,10],ymm2[11],ymm12[12,13,14],ymm2[15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm6, %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm6[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm6[6,13],zero,zero
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u],zero,zero,xmm6[1,8,15],zero,zero,xmm6[4,11]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm12, %xmm6
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm24, %ymm6
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm25
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm29, %zmm25
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm23, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm2[3,10],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm10, %ymm22, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm0[0],ymm5[1,2],ymm0[3],ymm5[4,5,6],ymm0[7,8],ymm5[9,10],ymm0[11],ymm5[12,13,14],ymm0[15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm30, %ymm2, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm31, %ymm2
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,zero,xmm6[6,13],zero,zero,xmm6[2,9,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm7[5,12]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm4[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm12, %xmm6
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm13[3,10],zero,zero,zero,xmm13[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = zero,zero,xmm11[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm12, %xmm2
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm6, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm5, %zmm28, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm14, %ymm19, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u,6,13],zero,zero,xmm5[2,9],zero,zero,zero,xmm5[u,u]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u],zero,zero,xmm5[4,11],zero,zero,xmm5[0,7,14,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm15, %ymm17, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm6[2,3,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm12[0],ymm6[1,2,3],ymm12[4],ymm6[5,6],ymm12[7,8],ymm6[9,10,11],ymm12[12],ymm6[13,14],ymm12[15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm5, %ymm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm5
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm12
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u,u,u,u,u],zero,zero,xmm12[2,9],zero,zero,zero,xmm12[5,12]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,4,11],zero,zero,xmm5[0,7,14],zero,zero
-; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm12, %xmm5
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm6, %ymm24, %ymm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm28
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm29, %zmm28
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm19, %ymm14, %ymm2
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,zero,xmm5[5,12],zero,zero,xmm5[1,8,15,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,0,7,14],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm5
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm4, %ymm24, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm22
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm30, %zmm22
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm26, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[2,9],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[0,7,14],zero,zero,xmm0[3,10,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm11, %ymm18, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8,9,10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %ymm28, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm2, %ymm0, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm23
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm16, %ymm31, %ymm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm6[u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm3[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm12[2,9],zero,zero,zero,xmm12[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,xmm7[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm5, %xmm2
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm4, %zmm28, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm14, %ymm19, %ymm4
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[6,13,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,5,12],zero,zero,xmm4[1,8,15],zero,zero,xmm4[u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm5
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm15, %ymm17, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm5[2,3,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3],ymm6[4],ymm5[5,6,7,8],ymm6[9],ymm5[10,11],ymm6[12],ymm5[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm2, %ymm5
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm5[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm5[1,2],ymm2[3],ymm5[4,5,6],ymm2[7,8],ymm5[9,10],ymm2[11],ymm5[12,13,14],ymm2[15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm4, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm4[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm4[6,13],zero,zero
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u],zero,zero,xmm4[1,8,15],zero,zero,xmm4[4,11]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm24, %ymm4
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm25
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm30, %zmm25
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm8, %ymm26, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[1,8,15],zero,zero,xmm0[4,11,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm11, %ymm18, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1,2],ymm1[3],ymm2[4,5,6],ymm1[7,8],ymm2[9,10],ymm1[11],ymm2[12,13,14],ymm1[15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,9,0,7,14,5,12,19,26],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm23, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm16, %ymm31, %ymm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,zero,xmm4[6,13],zero,zero,xmm4[2,9,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm4, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm6[5,12]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm3[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm3, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpor %xmm4, %xmm5, %xmm4
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm5
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm23, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm12[3,10],zero,zero,zero,xmm12[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm7[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm4, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm28, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm14, %ymm19, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm4, %xmm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm15, %ymm17, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1,2,3],ymm5[4],ymm4[5,6],ymm5[7,8],ymm4[9,10,11],ymm5[12],ymm4[13,14],ymm5[15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm2, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm2
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u],zero,zero,xmm6[3,10],zero,zero,zero,xmm6[6,13]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,5,12],zero,zero,xmm2[1,8,15],zero,zero
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, %xmm2, %ymm0, %ymm27
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm24, %ymm27
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm23, %ymm8, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,xmm5[2,9],zero,zero,zero,xmm5[5,12,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,11],zero,zero,xmm2[0,7,14],zero,zero,xmm2[u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vporq %xmm5, %xmm2, %xmm29
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm19, %ymm14, %ymm2
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,zero,xmm5[6,13],zero,zero,xmm5[2,9,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u],zero,zero,xmm5[2,9],zero,zero,zero,xmm5[5,12]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,4,11],zero,zero,xmm2[0,7,14],zero,zero
 ; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm5[2,3,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7,8],ymm6[9],ymm5[10,11,12],ymm6[13],ymm5[14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm2[u,u,u,u,u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, %xmm2, %ymm0, %ymm30
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm5, %ymm24, %ymm30
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm31, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm2[u,u,2,9],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vporq %xmm5, %xmm2, %xmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm16
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm23, %ymm8, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm3, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm23, %ymm8, %ymm3
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm7[6,13]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm4[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
-; AVX512DQ-SLOW-NEXT:    vpor %xmm8, %xmm12, %xmm8
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm10, %ymm22, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm4, %ymm24, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm28
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm30, %zmm28
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm19, %ymm14, %ymm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm15, %ymm17, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6,7,8],ymm4[9],ymm2[10,11],ymm4[12],ymm2[13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm27, %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[6,13]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero
+; AVX512DQ-SLOW-NEXT:    vpor %xmm4, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, %xmm0, %ymm0, %ymm27
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm24, %ymm27
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm26, %ymm8, %ymm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[2,9],zero,zero,zero,xmm2[5,12,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm19, %ymm14, %ymm0
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[2,9,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm17, %ymm15, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3,4],ymm4[5],ymm2[6,7,8],ymm4[9],ymm2[10,11,12],ymm4[13],ymm2[14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm21, %ymm20, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm0[u,u,u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm4, %xmm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, %xmm0, %ymm0, %ymm30
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm24, %ymm30
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm16, %ymm31, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[u,u,2,9],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u],zero,zero,xmm0[0,7,14],zero,zero,xmm0[3,10,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vporq %xmm2, %xmm0, %xmm24
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm20, %ymm21, %ymm29
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm9, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm26, %ymm8, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm13, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm26, %ymm8, %ymm13
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm6, %xmm7
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm6[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm6[6,13]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm1, %xmm6
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm1[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
+; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm8, %xmm5
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm11, %ymm18, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %ymm19, %ymm9, %ymm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm12
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm22, %ymm10, %ymm12
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm22, %ymm10, %ymm9
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm10
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,xmm10[3,10],zero,zero,zero,xmm10[6,13,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[5,12],zero,zero,xmm5[1,8,15],zero,zero,xmm5[u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm10, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0],ymm2[1,2,3],ymm0[4],ymm2[5,6],ymm0[7,8],ymm2[9,10,11],ymm0[12],ymm2[13,14],ymm0[15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm19, %ymm29, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, %xmm24, %ymm0, %ymm10
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm10, %ymm20, %ymm8
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm12[0],ymm0[1],ymm12[2,3],ymm0[4],ymm12[5,6,7,8],ymm0[9],ymm12[10,11],ymm0[12],ymm12[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm10[4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm19, %ymm5, %ymm10
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm3[6,13],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm10, %ymm8
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm11, %ymm8
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm11, %ymm9
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1,2,3],ymm1[4],ymm2[5,6],ymm1[7,8],ymm2[9,10,11],ymm1[12],ymm2[13,14],ymm1[15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[3,10,1,8,15,6,13,20,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm18, %ymm3, %ymm2
+; AVX512DQ-SLOW-NEXT:    vinserti32x4 $1, %xmm24, %ymm0, %ymm11
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm11, %ymm23, %ymm5
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm11
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[3,10],zero,zero,zero,xmm11[6,13,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[5,12],zero,zero,xmm0[1,8,15],zero,zero,xmm0[u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm0, %xmm11, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm1[1],ymm8[2,3],ymm1[4],ymm8[5,6,7,8],ymm1[9],ymm8[10,11],ymm1[12],ymm8[13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[4,11,2,9,0,7,14,21,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm18, %ymm0, %ymm8
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm13[6,13],zero,zero,xmm13[2,9],zero,zero,zero,xmm13[u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm3
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[4,11],zero,zero,xmm3[0,7,14,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm5, %xmm3, %xmm3
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm9[0],ymm0[1],ymm9[2,3,4],ymm0[5],ymm9[6,7,8],ymm0[9],ymm9[10,11,12],ymm0[13],ymm9[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm19, %ymm3, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm5, %xmm13, %xmm3
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm11[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm12[0],xmm3[1],xmm12[1],xmm3[2],xmm12[2],xmm3[3],xmm12[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm8, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm8, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm18, %ymm31, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm6[u,u,3,10],zero,zero,zero,xmm6[6,13],zero,zero,xmm6[u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,xmm6[1,8,15],zero,zero,xmm6[4,11,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm6, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %ymm17, %ymm1, %ymm15
-; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm18, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm4[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm7[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm7[0,7,14]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm6, %xmm12, %xmm6
+; AVX512DQ-SLOW-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm9[0],ymm1[1],ymm9[2,3,4],ymm1[5],ymm9[6,7,8],ymm1[9],ymm9[10,11,12],ymm1[13],ymm9[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[5,12,3,10,1,8,15,22,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm18, %ymm0, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm11, %xmm12, %xmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm9[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm5, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm18, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm16, %ymm31, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm4[u,u,3,10],zero,zero,zero,xmm4[6,13],zero,zero,xmm4[u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,xmm4[1,8,15],zero,zero,xmm4[4,11,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm4, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %ymm17, %ymm10, %ymm15
+; AVX512DQ-SLOW-NEXT:    vpternlogq $202, %ymm31, %ymm16, %ymm10
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm7[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm7[0,7,14]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm4, %xmm13, %xmm4
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm20, %ymm6
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm2, %ymm23, %ymm4
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[5,12,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,4,11],zero,zero,xmm1[0,7,14],zero,zero,xmm1[u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm10[u,u,4,11],zero,zero,xmm10[0,7,14],zero,zero,xmm10[u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm1, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm4[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm6[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
 ; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm7[1,8,15]
 ; AVX512DQ-SLOW-NEXT:    vpor %xmm2, %xmm7, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm20, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm11[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm13[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm23, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm9[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm12[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm6, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm10, %zmm8, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm5, %xmm11, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm13[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm8, %zmm2
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm4, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm8, %zmm18, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm11, %xmm9, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm12[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm18, %zmm2
 ; AVX512DQ-SLOW-NEXT:    movw $-512, %ax # imm = 0xFE00
 ; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm27, %zmm0, %zmm3 {%k1}
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm27, %zmm0, %zmm0 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm30, %zmm0, %zmm1 {%k1}
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm14[u,u,u,u,2,9],zero,zero,zero,xmm14[5,12],zero,zero,xmm14[u,u,u]
-; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,xmm5[0,7,14],zero,zero,xmm5[3,10,u,u,u]
-; AVX512DQ-SLOW-NEXT:    vpor %xmm0, %xmm5, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm15[2,3,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm15[0,1],ymm5[2],ymm15[3,4],ymm5[5],ymm15[6,7,8,9],ymm5[10],ymm15[11,12],ymm5[13],ymm15[14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[1,8,15,22,29,20,27,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm4
-; AVX512DQ-SLOW-NEXT:    vextracti32x4 $1, %ymm16, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[1,8,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,0,7,14],zero,zero,xmm4[3,10],zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpor %xmm0, %xmm4, %xmm0
-; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0,1,2],ymm0[3,4,5,6,7],ymm5[8,9,10],ymm0[11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm2 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vmovaps %zmm0, (%rsi)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm26, (%rdx)
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm14[u,u,u,u,2,9],zero,zero,zero,xmm14[5,12],zero,zero,xmm14[u,u,u]
+; AVX512DQ-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u],zero,zero,xmm4[0,7,14],zero,zero,xmm4[3,10,u,u,u]
+; AVX512DQ-SLOW-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm15[2,3,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm15[0,1],ymm4[2],ymm15[3,4],ymm4[5],ymm15[6,7,8,9],ymm4[10],ymm15[11,12],ymm4[13],ymm15[14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[1,8,15,22,29,20,27,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm5
+; AVX512DQ-SLOW-NEXT:    vextracti32x4 $1, %ymm29, %xmm3
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u],zero,zero,zero,xmm3[5,12],zero,zero,xmm3[1,8,15]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,0,7,14],zero,zero,xmm5[3,10],zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpor %xmm3, %xmm5, %xmm3
+; AVX512DQ-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm3, %zmm0, %zmm2 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vmovaps %zmm3, (%rsi)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm22, (%rdx)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm25, (%rcx)
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm28, (%r8)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, (%r9)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, (%r9)
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, (%rax)
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
@@ -10110,587 +10150,597 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ;
 ; AVX512DQ-FAST-LABEL: load_i8_stride7_vf64:
 ; AVX512DQ-FAST:       # %bb.0:
+; AVX512DQ-FAST-NEXT:    pushq %rax
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rdi), %ymm16
-; AVX512DQ-FAST-NEXT:    vmovdqa64 32(%rdi), %ymm26
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm8
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 64(%rdi), %ymm24
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm15, %ymm1
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm26, %ymm16, %ymm1
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm8, %ymm16, %ymm1
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,7,14],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpor %xmm2, %xmm1, %xmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %ymm10
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %ymm11
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm24, %ymm10, %ymm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm24, %ymm11, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa 80(%rdi), %xmm0
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7,8,9],ymm0[10],ymm2[11,12],ymm0[13],ymm2[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm0, %ymm9
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm0, %ymm7
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,13,4,11,2,9,16,23,30,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 128(%rdi), %ymm25
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 160(%rdi), %ymm31
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm12, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm25, %ymm31, %ymm1
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[6,13,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,5,12],zero,zero,xmm1[1,8,15],zero,zero,xmm1[u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm3, %xmm1, %xmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,1,2,4,6>
+; AVX512DQ-FAST-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,1,2,4,6>
 ; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rdi), %ymm1
-; AVX512DQ-FAST-NEXT:    vpermd %ymm1, %ymm3, %ymm3
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,23,26,29]
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,1,2,15,0,1,2,15]
-; AVX512DQ-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm8, %ymm3, %ymm12
-; AVX512DQ-FAST-NEXT:    vmovdqa 240(%rdi), %xmm8
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = zero,zero,zero,xmm8[5,12,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vmovdqa 224(%rdi), %xmm7
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm7[0,7,14],zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm13, %xmm14, %xmm13
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm13, %zmm12, %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm20, %zmm21
+; AVX512DQ-FAST-NEXT:    vpermd %ymm1, %ymm6, %ymm6
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,23,26,29]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm6[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa 240(%rdi), %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,zero,xmm0[5,12,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, %xmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa 224(%rdi), %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm0[0,7,14],zero,zero,xmm0[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm0, %xmm23
+; AVX512DQ-FAST-NEXT:    vpor %xmm10, %xmm13, %xmm10
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm3, %zmm20
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm21, %zmm20
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 288(%rdi), %ymm17
-; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm12
+; AVX512DQ-FAST-NEXT:    vmovdqa 256(%rdi), %ymm10
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm12, %ymm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm10, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm2[u,u,u,u,u,3,10],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[u,u]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11,u,u]
 ; AVX512DQ-FAST-NEXT:    vpor %xmm2, %xmm13, %xmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 352(%rdi), %ymm29
 ; AVX512DQ-FAST-NEXT:    vmovdqa 320(%rdi), %ymm13
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm14
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm12, %ymm14
 ; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm29, %ymm13, %ymm14
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm14[2,3,0,1]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm14[0,1],ymm5[2],ymm14[3,4,5],ymm5[6],ymm14[7,8,9],ymm5[10],ymm14[11,12,13],ymm5[14],ymm14[15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm14[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm14[0,1],ymm3[2],ymm14[3,4,5],ymm3[6],ymm14[7,8,9],ymm3[10],ymm14[11,12,13],ymm3[14],ymm14[15]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm27, %ymm2, %ymm5
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm27, %ymm2, %ymm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,65535]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 416(%rdi), %ymm18
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 384(%rdi), %ymm19
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm18, %ymm19, %ymm2
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm6
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u],zero,zero,zero,xmm6[6,13],zero,zero,xmm6[2,9]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u],zero,zero,zero,xmm5[6,13],zero,zero,xmm5[2,9]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm2[4,11],zero,zero
-; AVX512DQ-FAST-NEXT:    vpor %xmm6, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm2, %xmm2
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm5, %ymm22, %ymm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm3, %ymm22, %ymm2
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm21, %zmm30, %zmm0
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm20, %zmm30, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm26, %ymm16, %ymm2
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm6
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[6,13],zero,zero,xmm6[2,9,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm8, %ymm16, %ymm2
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[6,13],zero,zero,xmm3[2,9,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm6, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm15, %ymm6
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm24, %ymm6
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm9[2],ymm6[3,4,5],ymm9[6],ymm6[7,8,9],ymm9[10],ymm6[11,12,13],ymm9[14],ymm6[15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm6
+; AVX512DQ-FAST-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm15, %ymm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm24, %ymm3
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm7[2],ymm3[3,4,5],ymm7[6],ymm3[7,8,9],ymm7[10],ymm3[11,12,13],ymm7[14],ymm3[15]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm25, %ymm31, %ymm2
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm2[u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm2[u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm0, %xmm2, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,1,3,4,6>
-; AVX512DQ-FAST-NEXT:    vpermd %ymm1, %ymm2, %ymm2
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,17,20,27,30]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm2, %ymm3, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm8[6,13,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm7[1,8,15],zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm2, %xmm5, %xmm2
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm20, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm12, %ymm17, %ymm2
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u],zero,zero,xmm5[2,9],zero,zero,zero,xmm5[5,12,u,u]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,4,11],zero,zero,xmm2[0,7,14],zero,zero,xmm2[u,u]
 ; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,1,3,4,6>
+; AVX512DQ-FAST-NEXT:    vpermd %ymm1, %ymm5, %ymm5
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,17,20,27,30]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5,6],ymm5[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm6, %xmm9
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm6[6,13,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm23, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm0[1,8,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm6, %xmm5
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm2, %zmm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm21, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm17, %ymm3
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u],zero,zero,xmm5[2,9],zero,zero,zero,xmm5[5,12,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,4,11],zero,zero,xmm3[0,7,14],zero,zero,xmm3[u,u]
+; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm3, %xmm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm5
 ; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm29, %ymm13, %ymm5
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm5[2,3,0,1]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7,8,9,10],ymm6[11],ymm5[12,13],ymm6[14],ymm5[15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm27, %ymm2, %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm18, %ymm19, %ymm2
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm2[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm2[5,12],zero,zero
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm27, %ymm3, %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm12, %ymm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm18, %ymm19, %ymm3
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm3[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm3[5,12],zero,zero
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,xmm3[0,7,14],zero,zero,xmm3[3,10]
+; AVX512DQ-FAST-NEXT:    vpor %xmm6, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm5, %ymm22, %ymm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm20
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm30, %zmm20
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm12, %ymm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm8, %ymm16, %ymm2
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm2[2,9],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10]
-; AVX512DQ-FAST-NEXT:    vpor %xmm6, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm24, %ymm3
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm7[3],ymm3[4,5],ymm7[6],ymm3[7,8,9,10],ymm7[11],ymm3[12,13],ymm7[14],ymm3[15]
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[1,8,15,6,13,4,11,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm21, %ymm2, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm15, %ymm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm31, %ymm25, %ymm2
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u],zero,zero,zero,xmm5[5,12],zero,zero,xmm5[1,8,15,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,0,7,14],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm2, %xmm2
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm5, %ymm22, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm20
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm30, %zmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm26, %ymm16, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[2,9],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[0,7,14],zero,zero,xmm0[3,10,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm24, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm9[3],ymm2[4,5],ymm9[6],ymm2[7,8,9,10],ymm9[11],ymm2[12,13],ymm9[14],ymm2[15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm2, %ymm0, %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm6
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm15, %ymm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm31, %ymm25, %ymm0
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,1,3,5,6>
-; AVX512DQ-FAST-NEXT:    vpermd %ymm1, %ymm2, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,1,3,5,6>
+; AVX512DQ-FAST-NEXT:    vpermd %ymm1, %ymm5, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,21,24,31]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm3, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm7[2,9],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm8[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpor %xmm1, %xmm2, %xmm1
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[2,9],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,xmm9[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vpor %xmm2, %xmm5, %xmm2
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm1, %zmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm28, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm1
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm12, %ymm17, %ymm1
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[6,13,u,u]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,5,12],zero,zero,xmm1[1,8,15],zero,zero,xmm1[u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm3, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm3
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm13, %ymm29, %ymm3
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm3[2,3,0,1]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0],ymm3[1,2],ymm5[3],ymm3[4,5,6],ymm5[7,8],ymm3[9,10],ymm5[11],ymm3[12,13,14],ymm5[15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm27, %ymm1, %ymm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm28, %zmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm12, %ymm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm17, %ymm3
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[6,13,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,5,12],zero,zero,xmm3[1,8,15],zero,zero,xmm3[u,u]
+; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm5
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm13, %ymm29, %ymm5
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm5[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1,2],ymm6[3],ymm5[4,5,6],ymm6[7,8],ymm5[9,10],ymm6[11],ymm5[12,13,14],ymm6[15]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm27, %ymm3, %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm18, %ymm19, %ymm3
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm3[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm3[6,13],zero,zero
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,xmm3[1,8,15],zero,zero,xmm3[4,11]
+; AVX512DQ-FAST-NEXT:    vpor %xmm6, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm5, %ymm22, %ymm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm23
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm30, %zmm23
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm1
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm18, %ymm19, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm1[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm1[6,13],zero,zero
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm8, %ymm16, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[3,10],zero,zero,zero,xmm1[6,13],zero,zero,xmm1[u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u],zero,zero,xmm1[1,8,15],zero,zero,xmm1[4,11]
-; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm3, %ymm22, %ymm1
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm23
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm30, %zmm23
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm26, %ymm16, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[1,8,15],zero,zero,xmm0[4,11,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm1
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm24, %ymm1
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm9[0],ymm1[1,2],ymm9[3],ymm1[4,5,6],ymm9[7,8],ymm1[9,10],ymm9[11],ymm1[12,13,14],ymm9[15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm6, %ymm0, %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm31, %ymm25, %ymm0
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u],zero,zero,zero,xmm1[6,13],zero,zero,xmm1[2,9,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[1,8,15],zero,zero,xmm1[4,11,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm12, %ymm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm24, %ymm3
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm7[0],ymm3[1,2],ymm7[3],ymm3[4,5,6],ymm7[7,8],ymm3[9,10],ymm7[11],ymm3[12,13,14],ymm7[15]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[2,9,0,7,14,5,12,19,26],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm21, %ymm1, %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm1
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm31, %ymm25, %ymm1
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,zero,xmm3[6,13],zero,zero,xmm3[2,9,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,1,8,15],zero,zero,xmm1[4,11],zero,zero,xmm1[u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpor %xmm3, %xmm1, %xmm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa 208(%rdi), %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm1[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm1[5,12]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm1[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm1[5,12]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 192(%rdi), %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm3[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
-; AVX512DQ-FAST-NEXT:    vpor %xmm6, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm6
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm3[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
+; AVX512DQ-FAST-NEXT:    vpor %xmm2, %xmm7, %xmm2
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm21, %ymm2
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[3,10],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm8[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm6, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm28, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm12, %ymm17, %ymm2
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm2[u,u,u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm26, %ymm2
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm0[3,10],zero,zero,zero,xmm0[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = zero,zero,xmm9[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpor %xmm6, %xmm7, %xmm6
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm2, %zmm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm28, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm5
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm17, %ymm5
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u,6,13],zero,zero,xmm5[2,9],zero,zero,zero,xmm5[u,u]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u],zero,zero,xmm5[4,11],zero,zero,xmm5[0,7,14,u,u]
+; AVX512DQ-FAST-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm12, %ymm6
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm13, %ymm29, %ymm6
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0],ymm6[1,2,3],ymm7[4],ymm6[5,6],ymm7[7,8],ymm6[9,10,11],ymm7[12],ymm6[13,14],ymm7[15]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm6[5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm27, %ymm5, %ymm6
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm5
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm19, %ymm18, %ymm5
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm7
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u,u,u,u,u],zero,zero,xmm7[2,9],zero,zero,zero,xmm7[5,12]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,4,11],zero,zero,xmm5[0,7,14],zero,zero
+; AVX512DQ-FAST-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm22, %ymm5
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm28
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm30, %zmm28
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm15, %ymm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm10, %ymm2
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,zero,xmm5[5,12],zero,zero,xmm5[1,8,15,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,0,7,14],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[u,u]
 ; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm5
 ; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm13, %ymm29, %ymm5
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm5[2,3,0,1]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1,2,3],ymm6[4],ymm5[5,6],ymm6[7,8],ymm5[9,10,11],ymm6[12],ymm5[13,14],ymm6[15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3],ymm6[4],ymm5[5,6,7,8],ymm6[9],ymm5[10,11],ymm6[12],ymm5[13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm27, %ymm2, %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm12, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm19, %ymm18, %ymm2
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm6
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u],zero,zero,xmm6[2,9],zero,zero,zero,xmm6[5,12]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,4,11],zero,zero,xmm2[0,7,14],zero,zero
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,u,u,u,u,u],zero,zero,xmm6[3,10],zero,zero,zero,xmm6[6,13]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,5,12],zero,zero,xmm2[1,8,15],zero,zero
 ; AVX512DQ-FAST-NEXT:    vpor %xmm6, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm5, %ymm22, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm28
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm30, %zmm28
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm15, %ymm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm12, %ymm0
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15,u,u]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm2, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, %xmm2, %ymm0, %ymm27
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm5, %ymm22, %ymm27
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm16, %ymm8, %ymm2
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,xmm5[2,9],zero,zero,zero,xmm5[5,12,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,11],zero,zero,xmm2[0,7,14],zero,zero,xmm2[u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm2, %xmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm10, %ymm2
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,zero,xmm5[6,13],zero,zero,xmm5[2,9,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,1,8,15],zero,zero,xmm2[4,11],zero,zero,xmm2[u,u,u]
+; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm15, %ymm5
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm29, %ymm13, %ymm5
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm5[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm6[1],ymm5[2,3,4],ymm6[5],ymm5[6,7,8],ymm6[9],ymm5[10,11,12],ymm6[13],ymm5[14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm5
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm13, %ymm29, %ymm2
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm2[2,3,0,1]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm5[1],ymm2[2,3],ymm5[4],ymm2[5,6,7,8],ymm5[9],ymm2[10,11],ymm5[12],ymm2[13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm27, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm19, %ymm18, %ymm0
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm5
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[6,13]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero
-; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, %xmm0, %ymm0, %ymm27
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm2, %ymm22, %ymm27
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm16, %ymm26, %ymm0
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[2,9],zero,zero,zero,xmm2[5,12,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm2, %xmm0, %xmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm17, %ymm12, %ymm0
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[2,9,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm15, %ymm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm29, %ymm13, %ymm2
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm2[2,3,0,1]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm5[1],ymm2[2,3,4],ymm5[5],ymm2[6,7,8],ymm5[9],ymm2[10,11,12],ymm5[13],ymm2[14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm19, %ymm18, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm0[u,u,u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14]
-; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, %xmm0, %ymm0, %ymm30
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm2, %ymm22, %ymm30
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm31, %ymm25, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[u,u,2,9],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u],zero,zero,xmm0[0,7,14],zero,zero,xmm0[3,10,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vporq %xmm2, %xmm0, %xmm22
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm19, %ymm18, %ymm2
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm2[u,u,u,u,u,u,u,6,13],zero,zero,xmm2[2,9],zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14]
+; AVX512DQ-FAST-NEXT:    vpor %xmm6, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, %xmm2, %ymm0, %ymm30
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm5, %ymm22, %ymm30
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm12, %ymm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm31, %ymm25, %ymm2
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm2[u,u,2,9],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vporq %xmm5, %xmm2, %xmm22
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm18, %ymm19, %ymm15
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm11, %ymm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm16, %ymm26, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm5
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm16, %ymm26, %ymm4
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm1[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm1[6,13]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm12, %ymm5
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm16, %ymm8, %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm4, %ymm6
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm16, %ymm8, %ymm4
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm1[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm1[6,13]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm3[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
-; AVX512DQ-FAST-NEXT:    vpor %xmm6, %xmm8, %xmm6
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm10, %ymm24, %ymm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %ymm17, %ymm11, %ymm12
+; AVX512DQ-FAST-NEXT:    vpor %xmm7, %xmm8, %xmm7
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm11, %ymm24, %ymm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %ymm17, %ymm12, %ymm10
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm8
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm24, %ymm10, %ymm8
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm24, %ymm10, %ymm11
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm10
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,xmm10[3,10],zero,zero,zero,xmm10[6,13,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[5,12],zero,zero,xmm0[1,8,15],zero,zero,xmm0[u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vporq %xmm10, %xmm0, %xmm16
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm24, %ymm11, %ymm8
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm24, %ymm11, %ymm12
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm11
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[3,10],zero,zero,zero,xmm11[6,13,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[5,12],zero,zero,xmm5[1,8,15],zero,zero,xmm5[u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm11, %xmm5
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0],ymm2[1,2,3],ymm0[4],ymm2[5,6],ymm0[7,8],ymm2[9,10,11],ymm0[12],ymm2[13,14],ymm0[15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm17, %ymm9, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm10
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm10, %ymm21, %ymm6
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[3,10,1,8,15,6,13,20,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm21, %ymm9, %ymm2
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm11
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm11, %ymm26, %ymm7
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm0[1],ymm8[2,3],ymm0[4],ymm8[5,6,7,8],ymm0[9],ymm8[10,11],ymm0[12],ymm8[13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm0, %ymm10
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm17, %ymm16, %ymm8
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm4[6,13],zero,zero,xmm4[2,9],zero,zero,zero,xmm4[u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm8[4,11,2,9,0,7,14,21,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm21, %ymm5, %ymm8
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm4[6,13],zero,zero,xmm4[2,9],zero,zero,zero,xmm4[u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm4
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[4,11],zero,zero,xmm4[0,7,14,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm0, %xmm4, %xmm0
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm11[0],ymm10[1],ymm11[2,3,4],ymm10[5],ymm11[6,7,8],ymm10[9],ymm11[10,11,12],ymm10[13],ymm11[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm17, %ymm0, %ymm4
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm0
+; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm12[0],ymm0[1],ymm12[2,3,4],ymm0[5],ymm12[6,7,8],ymm0[9],ymm12[10,11,12],ymm0[13],ymm12[14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[5,12,3,10,1,8,15,22,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm21, %ymm4, %ymm5
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm9[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm6, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm6, %zmm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm31, %ymm25, %ymm5
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm5[u,u,3,10],zero,zero,zero,xmm5[6,13],zero,zero,xmm5[u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u],zero,zero,xmm5[1,8,15],zero,zero,xmm5[4,11,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm2, %xmm5, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm9, %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm0[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm7, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm7, %zmm4
+; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm31, %ymm25, %ymm6
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm6[u,u,3,10],zero,zero,zero,xmm6[6,13],zero,zero,xmm6[u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm6, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,xmm6[1,8,15],zero,zero,xmm6[4,11,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpor %xmm2, %xmm6, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpternlogq $226, %ymm29, %ymm14, %ymm13
 ; AVX512DQ-FAST-NEXT:    vpternlogq $202, %ymm25, %ymm31, %ymm14
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm3[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm1[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm1[0,7,14]
-; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm11, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm3[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm1[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm1[0,7,14]
+; AVX512DQ-FAST-NEXT:    vpor %xmm6, %xmm12, %xmm6
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm2, %ymm21, %ymm5
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm2, %ymm26, %ymm6
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[5,12,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm14[u,u,4,11],zero,zero,xmm14[0,7,14],zero,zero,xmm14[u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm2, %xmm11, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm14[u,u,4,11],zero,zero,xmm14[0,7,14],zero,zero,xmm14[u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpor %xmm2, %xmm12, %xmm2
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm1[1,8,15]
 ; AVX512DQ-FAST-NEXT:    vpor %xmm3, %xmm1, %xmm1
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
 ; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm2, %ymm21, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm9[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm7[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %ymm2, %ymm26, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm9[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm5, %zmm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm8, %zmm6, %zmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm10, %xmm9, %xmm3
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm7[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm6, %zmm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm8, %zmm7, %zmm2
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm9[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm6, %zmm1
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm7, %zmm1
 ; AVX512DQ-FAST-NEXT:    movw $-512, %ax # imm = 0xFE00
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm27, %zmm0, %zmm0 {%k1}
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm27, %zmm0, %zmm4 {%k1}
 ; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm30, %zmm0, %zmm2 {%k1}
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm12[u,u,u,u,2,9],zero,zero,zero,xmm12[5,12],zero,zero,xmm12[u,u,u]
-; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u],zero,zero,xmm4[0,7,14],zero,zero,xmm4[3,10,u,u,u]
-; AVX512DQ-FAST-NEXT:    vpor %xmm3, %xmm4, %xmm3
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm13[2,3,0,1]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm13[0,1],ymm4[2],ymm13[3,4],ymm4[5],ymm13[6,7,8,9],ymm4[10],ymm13[11,12],ymm4[13],ymm13[14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[1,8,15,22,29,20,27,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm4
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm10[u,u,u,u,2,9],zero,zero,zero,xmm10[5,12],zero,zero,xmm10[u,u,u]
+; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,xmm5[0,7,14],zero,zero,xmm5[3,10,u,u,u]
+; AVX512DQ-FAST-NEXT:    vpor %xmm3, %xmm5, %xmm3
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm13[2,3,0,1]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm13[0,1],ymm5[2],ymm13[3,4],ymm5[5],ymm13[6,7,8,9],ymm5[10],ymm13[11,12],ymm5[13],ymm13[14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[1,8,15,22,29,20,27,18,25],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm5
 ; AVX512DQ-FAST-NEXT:    vextracti128 $1, %ymm15, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u],zero,zero,zero,xmm3[5,12],zero,zero,xmm3[1,8,15]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm15[u,u,u,u,u,u,0,7,14],zero,zero,xmm15[3,10],zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpor %xmm3, %xmm5, %xmm3
-; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm3, %zmm0, %zmm1 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovaps %zmm3, (%rsi)
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm15[u,u,u,u,u,u,0,7,14],zero,zero,xmm15[3,10],zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpor %xmm3, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0,1,2],ymm0[3,4,5,6,7],ymm5[8,9,10],ymm0[11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovaps %zmm0, (%rsi)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, (%rdx)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, (%rcx)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm28, (%r8)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, (%r9)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, (%r9)
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
+; AVX512DQ-FAST-NEXT:    popq %rax
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
 ;
 ; AVX512BW-ONLY-SLOW-LABEL: load_i8_stride7_vf64:
 ; AVX512BW-ONLY-SLOW:       # %bb.0:
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm27
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <8,1,18,11,4,5,22,15,u,25,10,u,12,29,14,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm27, %zmm0, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <0,1,18,11,4,21,14,7,8,25,10,u,28,13,u,15>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm27, %zmm0, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <0,17,10,3,4,21,14,7,24,9,u,11,28,13,u,31>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm27, %zmm0, %zmm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <16,17,10,3,20,13,6,23,24,25,u,27,28,u,30,31>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm27, %zmm0, %zmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm28
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm24
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <8,1,18,11,4,5,22,15,u,25,10,u,12,29,14,u>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm24, %zmm1, %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,1,18,11,4,21,14,7,8,25,10,u,28,13,u,15>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm24, %zmm1, %zmm17
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,17,10,3,4,21,14,7,24,9,u,11,28,13,u,31>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm24, %zmm1, %zmm11
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <16,17,10,3,20,13,6,23,24,25,u,27,28,u,30,31>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm24, %zmm1, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm6
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm2
 ; AVX512BW-ONLY-SLOW-NEXT:    movw $-28382, %ax # imm = 0x9122
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm2, %ymm28, %ymm0 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm2, %ymm6, %ymm1 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %k1, %k2
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[5,12],zero,zero,xmm5[1,8,15,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm5, %xmm0, %xmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[5,12],zero,zero,xmm4[1,8,15,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,7,14],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm4, %xmm1, %xmm25
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm4, %ymm26 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm3, %ymm25 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm8
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm5
 ; AVX512BW-ONLY-SLOW-NEXT:    movw $8772, %ax # imm = 0x2244
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k7
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm6, %ymm5, %ymm4 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u],zero,zero,xmm7[3,10],zero,zero,zero,xmm7[6,13,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,5,12],zero,zero,xmm4[1,8,15],zero,zero,xmm4[u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm7, %xmm4, %xmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm10
+; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k6
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm8, %ymm5, %ymm3 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[6,13,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,5,12],zero,zero,xmm3[1,8,15],zero,zero,xmm3[u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm7
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm21 = <u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm21, %xmm10, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm11[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm22 = [0,1,2,11,0,1,2,11]
-; AVX512BW-ONLY-SLOW-NEXT:    # ymm22 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %ymm4, %ymm22, %ymm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm4[5,12,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm7[0,7,14],zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm9, %xmm12, %xmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm9, %zmm8, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm21, %xmm7, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm3[0,1,2,3,4,5,6],ymm4[7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 240(%rdi), %xmm26
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm26[5,12,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm4[0,7,14],zero,zero,xmm4[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm12, %xmm13, %xmm12
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm12, %zmm10, %zmm10
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $137438429184, %rax # imm = 0x1FFFF80000
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rax, %k5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm8, %zmm26 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm9
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm8
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm10, %zmm25 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm13
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm12
 ; AVX512BW-ONLY-SLOW-NEXT:    movw $9288, %ax # imm = 0x2448
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k4
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm9, %ymm8, %ymm12 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm12[u,u,u,u,u,3,10],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm12
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u,u,u],zero,zero,xmm12[1,8,15],zero,zero,xmm12[4,11,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm13, %xmm12, %xmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm13
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm14
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm13, %ymm14, %ymm12 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm12[2,3,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm15[2],ymm12[3,4,5],ymm15[6],ymm12[7,8,9],ymm15[10],ymm12[11,12,13],ymm15[14],ymm12[15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    movw $3968, %ax # imm = 0xF80
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm12, %ymm19 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 416(%rdi), %ymm15
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm12
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm13, %ymm12, %ymm10 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm10[u,u,u,u,u,3,10],zero,zero,zero,xmm10[6,13],zero,zero,xmm10[u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm10
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[u,u,u,u,u],zero,zero,xmm10[1,8,15],zero,zero,xmm10[4,11,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm14, %xmm10, %xmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 352(%rdi), %ymm16
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm15
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm16, %ymm15, %ymm10 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm10[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm14[2],ymm10[3,4,5],ymm14[6],ymm10[7,8,9],ymm14[10],ymm10[11,12,13],ymm14[14],ymm10[15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    movw $3968, %ax # imm = 0xF80
+; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm10, %ymm19 {%k7}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 416(%rdi), %ymm14
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm10
 ; AVX512BW-ONLY-SLOW-NEXT:    movw $4644, %ax # imm = 0x1224
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k6
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm15, %ymm12, %ymm20 {%k6}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm20, %xmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm23[u,u,u,u,u,u,u],zero,zero,zero,xmm23[6,13],zero,zero,xmm23[2,9]
+; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k4
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm14, %ymm10, %ymm20 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm20, %xmm22
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,u,u,u,u,u,u],zero,zero,zero,xmm22[6,13],zero,zero,xmm22[2,9]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm20 = xmm20[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm20[4,11],zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm23, %xmm20, %xmm20
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm22, %xmm20, %xmm20
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm20
 ; AVX512BW-ONLY-SLOW-NEXT:    movl $-8388608, %eax # imm = 0xFF800000
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm2, %ymm28, %ymm23 {%k6}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm23, %xmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm24 = zero,zero,zero,xmm24[6,13],zero,zero,xmm24[2,9,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm23[1,8,15],zero,zero,xmm23[4,11],zero,zero,xmm23[u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm24, %xmm23, %xmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm16 = ymm16[u,u,u,u,u,u,u,u,u,0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm2, %ymm6, %ymm22 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm22, %xmm23
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = zero,zero,zero,xmm23[6,13],zero,zero,xmm23[2,9,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[1,8,15],zero,zero,xmm22[4,11],zero,zero,xmm22[u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm23, %xmm22, %xmm22
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    movl $511, %edi # imm = 0x1FF
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %edi, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm23, %ymm16 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm6, %ymm5, %ymm23 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm24 = xmm23[u,u,u,6,13],zero,zero,xmm23[2,9],zero,zero,zero,xmm23[u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm23, %xmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm23[u,u,u],zero,zero,xmm23[4,11],zero,zero,xmm23[0,7,14,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm24, %xmm23, %xmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm24 = xmm11[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm25 = xmm10[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm24 = xmm25[0],xmm24[0],xmm25[1],xmm24[1],xmm25[2],xmm24[2],xmm25[3],xmm24[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %ymm24, %ymm22, %ymm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm24 = zero,zero,zero,xmm4[6,13,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm25 = xmm7[1,8,15],zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm24, %xmm25, %xmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm24, %zmm23, %zmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm23, %zmm16 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm2, %ymm28, %ymm23 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm24 = xmm23[2,9],zero,zero,zero,xmm23[5,12],zero,zero,xmm23[u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm23, %xmm23
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = zero,zero,xmm23[0,7,14],zero,zero,xmm23[3,10,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm24, %xmm23, %xmm23
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm22, %ymm11 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm8, %ymm5, %ymm22 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm22[u,u,u,6,13],zero,zero,xmm22[2,9],zero,zero,zero,xmm22[u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm22, %xmm22
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,u,u],zero,zero,xmm22[4,11],zero,zero,xmm22[0,7,14,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm23, %xmm22, %xmm22
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm9[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm7[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm22 = xmm23[0],xmm22[0],xmm23[1],xmm22[1],xmm23[2],xmm22[2],xmm23[3],xmm22[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm26[6,13,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm4[1,8,15],zero,zero,xmm4[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm1, %xmm22, %xmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm11 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm2, %ymm6, %ymm0 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[2,9],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[0,7,14],zero,zero,xmm0[3,10,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    movl $261632, %edi # imm = 0x3FE00
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %edi, %k5
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm23 {%k5} = ymm17[u,u,u,u,u,u,u,u,u,1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm5, %ymm6, %ymm17 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm17, %xmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm24 = xmm24[u,u],zero,zero,zero,xmm24[5,12],zero,zero,xmm24[1,8,15,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,0,7,14],zero,zero,xmm17[3,10],zero,zero,zero,xmm17[u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm24, %xmm17, %xmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm21, %xmm11, %xmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm24 = xmm10[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm21 = xmm24[0],xmm21[0],xmm24[1],xmm21[1],xmm24[2],xmm21[2],xmm24[3],xmm21[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermt2d %ymm21, %ymm22, %ymm17
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm21 = xmm7[2,9],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = zero,zero,xmm4[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm21, %xmm22, %xmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm21, %zmm17, %zmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm23, %zmm17 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm2, %ymm28, %ymm21 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm21[3,10],zero,zero,zero,xmm21[6,13],zero,zero,xmm21[u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm21, %xmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm21 = zero,zero,xmm21[1,8,15],zero,zero,xmm21[4,11,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm22, %xmm21, %xmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm21 {%k5} = ymm18[u,u,u,u,u,u,u,u,u,2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm5, %ymm6, %ymm18 {%k6}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm18, %xmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,u],zero,zero,zero,xmm22[6,13],zero,zero,xmm22[2,9,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm18 = xmm18[u,u,1,8,15],zero,zero,xmm18[4,11],zero,zero,xmm18[u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm22, %xmm18, %xmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm18, %ymm0, %ymm18
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm11[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm11[5,12]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm10[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm22, %xmm23, %xmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm22
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 {%k5} = ymm17[u,u,u,u,u,u,u,u,u,1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm5, %ymm8, %ymm1 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm1, %xmm17
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u],zero,zero,zero,xmm17[5,12],zero,zero,xmm17[1,8,15,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,0,7,14],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm17, %xmm1, %xmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm21, %xmm9, %xmm17
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm21 = xmm7[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm17 = xmm21[0],xmm17[0],xmm21[1],xmm17[1],xmm21[2],xmm17[2],xmm21[3],xmm17[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm3[7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm4[2,9],zero,zero,zero,xmm4[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = zero,zero,xmm26[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm3, %xmm17, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm1, %zmm17
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm0, %zmm17 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm2, %ymm6, %ymm0 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[1,8,15],zero,zero,xmm0[4,11,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 {%k5} = ymm18[u,u,u,u,u,u,u,u,u,2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm5, %ymm8, %ymm1 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,zero,xmm3[6,13],zero,zero,xmm3[2,9,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,1,8,15],zero,zero,xmm1[4,11],zero,zero,xmm1[u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm9[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm9[5,12]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm18 = xmm7[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm3, %xmm18, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512BW-ONLY-SLOW-NEXT:    movl $-134217728, %edi # imm = 0xF8000000
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %edi, %k2
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm22, %ymm18 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm7[3,10],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = zero,zero,xmm4[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm22, %xmm23, %xmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm22, %zmm18, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm21, %zmm18 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm3, %ymm1 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm4[3,10],zero,zero,zero,xmm4[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm18 = zero,zero,xmm26[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm3, %xmm18, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm1, %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm0, %zmm18 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k2
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm20, %ymm19 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm0, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm0, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $-137438953472, %rax # imm = 0xFFFFFFE000000000
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm19, %zmm26 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm8, %ymm9, %ymm19 {%k6}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm19, %xmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm20 = xmm20[u,u,u,u,u],zero,zero,xmm20[2,9],zero,zero,zero,xmm20[5,12,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm19[u,u,u,u,u,4,11],zero,zero,xmm19[0,7,14],zero,zero,xmm19[u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm20, %xmm19, %xmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm13, %ymm14, %ymm0 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8,9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm0, %ymm19 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm15, %ymm12, %ymm0 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm0[5,12],zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[0,7,14],zero,zero,xmm0[3,10]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm25 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm12, %ymm13, %ymm0 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u],zero,zero,xmm1[2,9],zero,zero,zero,xmm1[5,12,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm19 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm0, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm16 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm8, %ymm9, %ymm0 {%k7}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm16, %ymm15, %ymm1 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3],ymm1[4,5],ymm3[6],ymm1[7,8,9,10],ymm3[11],ymm1[12,13],ymm3[14],ymm1[15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k7}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm14, %ymm10, %ymm1 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm1[5,12],zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u],zero,zero,xmm1[0,7,14],zero,zero,xmm1[3,10]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm11 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm12, %ymm13, %ymm0 {%k6}
 ; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[6,13,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero,xmm0[u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm14, %ymm13, %ymm1 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm15, %ymm16, %ymm1 {%k4}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1,2],ymm3[3],ymm1[4,5,6],ymm3[7,8],ymm1[9,10],ymm3[11],ymm1[12,13,14],ymm3[15]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm15, %ymm12, %ymm1 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k7}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm14, %ymm10, %ymm1 {%k3}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm1[6,13],zero,zero
 ; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u],zero,zero,xmm1[1,8,15],zero,zero,xmm1[4,11]
@@ -10699,17 +10749,17 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k2}
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm17 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm8, %ymm9, %ymm0 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm12, %ymm13, %ymm0 {%k3}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero,xmm0[u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm14, %ymm13, %ymm1 {%k7}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm15, %ymm16, %ymm1 {%k6}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1,2,3],ymm3[4],ymm1[5,6],ymm3[7,8],ymm1[9,10,11],ymm3[12],ymm1[13,14],ymm3[15]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm12, %ymm15, %ymm1 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k7}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm10, %ymm14, %ymm1 {%k4}
 ; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm3
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[5,12]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,4,11],zero,zero,xmm1[0,7,14],zero,zero
@@ -10719,173 +10769,174 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm18 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm9, %ymm8, %ymm0 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm13, %ymm12, %ymm0 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u],zero,zero,zero,xmm1[5,12],zero,zero,xmm1[1,8,15,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm1, %xmm0, %xmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm14, %ymm13, %ymm0 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm15, %ymm16, %ymm0 {%k3}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6,7,8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14,15]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm0, %ymm19 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm9, %ymm8, %ymm0 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm0, %ymm19 {%k7}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm13, %ymm12, %ymm0 {%k4}
 ; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u],zero,zero,zero,xmm1[6,13],zero,zero,xmm1[2,9,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm13, %ymm14, %ymm1 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    kmovq %k1, %k3
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm16, %ymm15, %ymm1 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    kmovq %k1, %k7
 ; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3,4],ymm3[5],ymm1[6,7,8],ymm3[9],ymm1[10,11,12],ymm3[13],ymm1[14,15]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm20 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    movl $8176, %eax # imm = 0x1FF0
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm20 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm12, %ymm15, %ymm0 {%k7}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm10, %ymm14, %ymm0 {%k6}
 ; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[6,13]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero
 ; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm19 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm12, %ymm15, %ymm0 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm10, %ymm14, %ymm0 {%k3}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero
 ; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm20 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm28, %ymm2, %ymm0 {%k6}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm28, %ymm2, %ymm1 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm5, %ymm6, %ymm3 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm28, %ymm2 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm5, %ymm6, %ymm21 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm21[u,u,2,9],zero,zero,zero,xmm21[5,12],zero,zero,xmm21[u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm21, %xmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm21 = xmm21[u,u],zero,zero,xmm21[0,7,14],zero,zero,xmm21[3,10,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm22, %xmm21, %xmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm21, %ymm0, %ymm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm11[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm11[6,13]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm10[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm22, %xmm23, %xmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm22
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm5, %ymm8, %ymm0 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm6, %ymm2, %ymm21 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm16, %ymm15 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm15[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm15[0,1],ymm1[2],ymm15[3,4],ymm1[5],ymm15[6,7,8,9],ymm1[10],ymm15[11,12],ymm1[13],ymm15[14,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm13, %ymm12 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm12[u,u,u,u,2,9],zero,zero,zero,xmm12[5,12],zero,zero,xmm12[u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm12
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u,u],zero,zero,xmm12[0,7,14],zero,zero,xmm12[3,10,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm12, %xmm12
+; AVX512BW-ONLY-SLOW-NEXT:    movl $4186112, %eax # imm = 0x3FE000
+; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 {%k1} = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,1,8,15,22,29,20,27,18,25,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm14, %ymm10 {%k7}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm6, %ymm2, %ymm1 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm5, %ymm8, %ymm3 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm6, %ymm2 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm8, %ymm5 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm0[u,u,2,9],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u],zero,zero,xmm0[0,7,14],zero,zero,xmm0[3,10,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm6, %xmm0, %xmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm9[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm9[6,13]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm6, %xmm8, %xmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 4-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm22, %ymm21 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm3[u,u,3,10],zero,zero,zero,xmm3[6,13],zero,zero,xmm3[u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm6, %ymm0 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm3[u,u,3,10],zero,zero,zero,xmm3[6,13],zero,zero,xmm3[u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm3
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,xmm3[1,8,15],zero,zero,xmm3[4,11,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm22, %xmm3, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm6, %xmm3, %xmm3
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm10[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm11[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm11[0,7,14]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm22, %xmm23, %xmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm22
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm22, %ymm3 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm6, %ymm5 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm9[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm9[0,7,14]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm6, %xmm8, %xmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm6, %ymm3 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm6
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,xmm6[2,9],zero,zero,zero,xmm6[5,12,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,4,11],zero,zero,xmm5[0,7,14],zero,zero,xmm5[u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm6, %xmm5, %xmm5
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm10[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm11[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm11[1,8,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm6, %xmm10, %xmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm9[1,8,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm6, %xmm7, %xmm6
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm6, %ymm5 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <16,9,2,19,20,13,6,23,24,u,26,27,28,u,30,31>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm27, %zmm6, %zmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = <16,9,2,19,12,5,22,23,24,u,26,27,u,29,30,31>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm27, %zmm10, %zmm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = <8,1,2,19,12,5,22,15,u,9,26,11,u,29,14,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm27, %zmm11, %zmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm0, %xmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = zero,zero,xmm22[2,9],zero,zero,zero,xmm22[5,12,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm22, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 {%k5} = ymm11[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm7, %xmm22
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm4[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm22 = xmm22[0],xmm23[0],xmm22[1],xmm23[1],xmm22[2],xmm23[2],xmm22[3],xmm23[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm21, %zmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm21, %zmm0 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm1, %xmm21
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm21 = zero,zero,xmm21[3,10],zero,zero,zero,xmm21[6,13,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[5,12],zero,zero,xmm1[1,8,15],zero,zero,xmm1[u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm21, %xmm1, %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 {%k5} = ymm10[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm4[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm21 = xmm7[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm21[0],xmm10[0],xmm21[1],xmm10[1],xmm21[2],xmm10[2],xmm21[3],xmm10[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm3, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm3, %zmm1 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm24, %zmm6, %zmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <16,9,2,19,12,5,22,23,24,u,26,27,u,29,30,31>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm24, %zmm7, %zmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = <8,1,2,19,12,5,22,15,u,9,26,11,u,29,14,u>
+; AVX512BW-ONLY-SLOW-NEXT:    vpermw %zmm24, %zmm8, %zmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,xmm9[2,9],zero,zero,zero,xmm9[5,12,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[4,11],zero,zero,xmm1[0,7,14],zero,zero,xmm1[u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm1, %xmm9, %xmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 {%k5} = ymm8[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm4, %xmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm26[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm0, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    movw $-512, %ax # imm = 0xFE00
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm21, %xmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,xmm9[3,10],zero,zero,zero,xmm9[6,13,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm21[5,12],zero,zero,xmm21[1,8,15],zero,zero,xmm21[u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm9, %xmm13, %xmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 {%k5} = ymm7[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm26[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm4[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm13[0],xmm7[0],xmm13[1],xmm7[1],xmm13[2],xmm7[2],xmm13[3],xmm7[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm3, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm3, %zmm9 {%k5}
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm2[6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm2, %xmm2
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 {%k5} = ymm6[u,u,u,u,u,u,u,u,u,5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm11, %xmm4, %xmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm7[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm26, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm5, %zmm3
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm3, %zmm2 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm13, %ymm14 {%k6}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm14[2,3,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm14[0,1],ymm3[2],ymm14[3,4],ymm3[5],ymm14[6,7,8,9],ymm3[10],ymm14[11,12],ymm3[13],ymm14[14,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm9, %ymm8 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm8[u,u,u,u,2,9],zero,zero,zero,xmm8[5,12],zero,zero,xmm8[u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,xmm5[0,7,14],zero,zero,xmm5[3,10,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm5, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    movl $4186112, %eax # imm = 0x3FE000
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 {%k1} = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,1,8,15,22,29,20,27,18,25,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    movw $-512, %ax # imm = 0xFE00
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm15, %ymm12 {%k3}
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa32 %zmm3, %zmm0 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm20, %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa32 %zmm3, %zmm1 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u],zero,zero,zero,xmm3[5,12],zero,zero,xmm3[1,8,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm12[u,u,u,u,u,u,0,7,14],zero,zero,xmm12[3,10],zero,zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm5, %xmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,1,2,3,4,5,6,7,8,9,10,35,36,37,38,39]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm3, %zmm4, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa32 %zmm3, %zmm2 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm20, %zmm0, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm9 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[1,8,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm10[u,u,u,u,u,u,0,7,14],zero,zero,xmm10[3,10],zero,zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm12[0,1,2],ymm0[3,4,5,6,7],ymm12[8,9,10],ymm0[11,12,13,14,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm12[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm2 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rdi
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm26, (%rsi)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, (%rdx)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm25, (%rsi)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, (%rdx)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, (%rcx)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, (%r8)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, (%r9)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, (%rdi)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, (%r9)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, (%rdi)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, (%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vzeroupper
 ; AVX512BW-ONLY-SLOW-NEXT:    retq
 ;
 ; AVX512BW-ONLY-FAST-LABEL: load_i8_stride7_vf64:
 ; AVX512BW-ONLY-FAST:       # %bb.0:
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm26
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [24,17,2,19,28,21,6,31,16,9,26,27,20,13,30,23]
-; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm0, %zmm2, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm26, %zmm2, %zmm24
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [8,1,18,11,4,5,22,15,0,25,10,3,12,29,14,7]
 ; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm1, %zmm2, %zmm18
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [24,17,2,27,20,5,22,31,16,9,26,19,12,29,30,23]
-; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm0, %zmm2, %zmm16
+; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm26, %zmm2, %zmm16
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,18,11,4,21,14,7,8,25,10,3,28,13,6,15]
-; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm1, %zmm2, %zmm15
+; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm1, %zmm2, %zmm25
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,2,11,4,5,14,7,8,9,26,19,12,29,22,15]
-; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm0, %zmm2, %zmm17
+; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm26, %zmm2, %zmm17
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,17,10,3,4,21,14,7,24,9,2,11,28,13,6,31]
 ; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm1, %zmm2, %zmm13
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,10,3,4,5,14,7,8,25,18,11,12,29,22,15]
-; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm0, %zmm2, %zmm10
+; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm26, %zmm2, %zmm10
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [16,17,10,3,20,13,6,23,24,25,18,27,28,21,30,31]
 ; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm1, %zmm2, %zmm4
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm9
@@ -10914,31 +10965,30 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[6,13,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,5,12],zero,zero,xmm4[1,8,15],zero,zero,xmm4[u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,1,2,4,6>
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm19
 ; AVX512BW-ONLY-FAST-NEXT:    vpermd %ymm19, %ymm5, %ymm5
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,23,26,29]
-; AVX512BW-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm20 = [0,1,2,15,0,1,2,15]
-; AVX512BW-ONLY-FAST-NEXT:    # ymm20 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %ymm5, %ymm20, %ymm4
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 240(%rdi), %xmm5
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[5,12,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm5[7]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 240(%rdi), %xmm6
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm6[5,12,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 224(%rdi), %xmm7
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm7[0,7,14],zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm6, %xmm12, %xmm6
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm4, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm5, %xmm12, %xmm5
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm4, %zmm4
 ; AVX512BW-ONLY-FAST-NEXT:    movabsq $137438429184, %rax # imm = 0x1FFFF80000
 ; AVX512BW-ONLY-FAST-NEXT:    kmovq %rax, %k5
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %zmm4, %zmm2 {%k5}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 288(%rdi), %ymm6
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 288(%rdi), %ymm5
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 256(%rdi), %ymm4
 ; AVX512BW-ONLY-FAST-NEXT:    movw $9288, %ax # imm = 0x2448
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k7
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm6, %ymm4, %ymm12 {%k7}
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm12[u,u,u,u,u,3,10],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm5, %ymm4, %ymm12 {%k7}
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm12[u,u,u,u,u,3,10],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm12
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u,u,u],zero,zero,xmm12[1,8,15],zero,zero,xmm12[4,11,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm21, %xmm12, %xmm21
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm20, %xmm12, %xmm21
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    movw $3968, %ax # imm = 0xF80
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
@@ -10948,180 +10998,182 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa 384(%rdi), %ymm10
 ; AVX512BW-ONLY-FAST-NEXT:    movw $4644, %ax # imm = 0x1224
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k6
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm12, %ymm10, %ymm22 {%k6}
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm22, %xmm23
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = xmm23[u,u,u,u,u,u,u],zero,zero,zero,xmm23[6,13],zero,zero,xmm23[2,9]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm22[4,11],zero,zero
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm23, %xmm22, %xmm22
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm22
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm12, %ymm10, %ymm20 {%k6}
+; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm20, %xmm22
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,u,u,u,u,u,u],zero,zero,zero,xmm22[6,13],zero,zero,xmm22[2,9]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm20[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm20[4,11],zero,zero
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm22, %xmm20, %xmm20
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm22
 ; AVX512BW-ONLY-FAST-NEXT:    movl $-8388608, %eax # imm = 0xFF800000
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm3, %ymm9, %ymm23 {%k6}
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm23, %xmm24
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm24 = zero,zero,zero,xmm24[6,13],zero,zero,xmm24[2,9,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = xmm23[1,8,15],zero,zero,xmm23[4,11],zero,zero,xmm23[u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm24, %xmm23, %xmm23
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm3, %ymm9, %ymm20 {%k6}
+; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm20, %xmm23
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = zero,zero,zero,xmm23[6,13],zero,zero,xmm23[2,9,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm20[1,8,15],zero,zero,xmm20[4,11],zero,zero,xmm20[u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm23, %xmm20, %xmm20
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    movl $511, %r10d # imm = 0x1FF
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %r10d, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm23, %ymm13 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm11, %ymm8, %ymm23 {%k7}
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm24 = xmm23[u,u,u,6,13],zero,zero,xmm23[2,9],zero,zero,zero,xmm23[u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm23, %xmm23
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = xmm23[u,u,u],zero,zero,xmm23[4,11],zero,zero,xmm23[0,7,14,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm24, %xmm23, %xmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = <u,u,u,u,1,3,4,6>
-; AVX512BW-ONLY-FAST-NEXT:    vpermd %ymm19, %ymm24, %ymm24
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm24 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,17,20,27,30]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %ymm24, %ymm20, %ymm23
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm24 = zero,zero,zero,xmm5[6,13,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm25 = xmm7[1,8,15],zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm24, %xmm25, %xmm24
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm24, %zmm23, %zmm23
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %zmm23, %zmm13 {%k5}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm3, %ymm9, %ymm23 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm24 = xmm23[2,9],zero,zero,zero,xmm23[5,12],zero,zero,xmm23[u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm23, %xmm23
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = zero,zero,xmm23[0,7,14],zero,zero,xmm23[3,10,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm24, %xmm23, %xmm23
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm20, %ymm13 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm11, %ymm8, %ymm20 {%k7}
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = xmm20[u,u,u,6,13],zero,zero,xmm20[2,9],zero,zero,zero,xmm20[u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm20, %xmm20
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm20[u,u,u],zero,zero,xmm20[4,11],zero,zero,xmm20[0,7,14,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm23, %xmm20, %xmm20
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm14
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = <u,u,u,u,1,3,4,6>
+; AVX512BW-ONLY-FAST-NEXT:    vpermd %ymm19, %ymm20, %ymm20
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,17,20,27,30]
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5,6],ymm15[7]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm6[6,13,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm7[1,8,15],zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm15, %xmm20, %xmm15
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm15, %zmm14, %zmm14
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %zmm14, %zmm13 {%k5}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm3, %ymm9, %ymm14 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[2,9],zero,zero,zero,xmm14[5,12],zero,zero,xmm14[u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm14
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = zero,zero,xmm14[0,7,14],zero,zero,xmm14[3,10,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm15, %xmm14, %xmm14
 ; AVX512BW-ONLY-FAST-NEXT:    movl $261632, %r10d # imm = 0x3FE00
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %r10d, %k5
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm23 {%k5} = ymm15[u,u,u,u,u,u,u,u,u,1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm14 {%k5} = ymm25[u,u,u,u,u,u,u,u,u,1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm8, %ymm11, %ymm15 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm15, %xmm24
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm24 = xmm24[u,u],zero,zero,zero,xmm24[5,12],zero,zero,xmm24[1,8,15,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm15, %xmm20
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm20[u,u],zero,zero,zero,xmm20[5,12],zero,zero,xmm20[1,8,15,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm15[u,u,0,7,14],zero,zero,xmm15[3,10],zero,zero,zero,xmm15[u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm24, %xmm15, %xmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = <u,u,u,u,1,3,5,6>
-; AVX512BW-ONLY-FAST-NEXT:    vpermd %ymm19, %ymm24, %ymm19
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm19 = ymm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,21,24,31]
-; AVX512BW-ONLY-FAST-NEXT:    vpermt2d %ymm19, %ymm20, %ymm15
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm19 = xmm7[2,9],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = zero,zero,xmm5[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm19, %xmm20, %xmm19
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm19, %zmm15, %zmm15
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %zmm23, %zmm15 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm3, %ymm9, %ymm19 {%k7}
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm19[3,10],zero,zero,zero,xmm19[6,13],zero,zero,xmm19[u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm19, %xmm19
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm19 = zero,zero,xmm19[1,8,15],zero,zero,xmm19[4,11,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm20, %xmm19, %xmm23
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm23 {%k5} = ymm18[u,u,u,u,u,u,u,u,u,2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm8, %ymm11, %ymm18 {%k6}
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm18, %xmm19
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm19 = xmm19[u,u],zero,zero,zero,xmm19[6,13],zero,zero,xmm19[2,9,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm18 = xmm18[u,u,1,8,15],zero,zero,xmm18[4,11],zero,zero,xmm18[u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm19, %xmm18, %xmm18
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm18, %ymm0, %ymm18
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm20, %xmm15, %xmm15
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = <u,u,u,u,1,3,5,6>
+; AVX512BW-ONLY-FAST-NEXT:    vpermd %ymm19, %ymm20, %ymm19
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,21,24,31]
+; AVX512BW-ONLY-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1,2,3,4,5,6],ymm0[7]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm7[2,9],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm19 = zero,zero,xmm6[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm15, %xmm19, %xmm15
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm15, %zmm0, %zmm15
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %zmm14, %zmm15 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm3, %ymm9, %ymm0 {%k7}
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm0[3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[1,8,15],zero,zero,xmm0[4,11,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm0, %xmm14, %xmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 {%k5} = ymm18[u,u,u,u,u,u,u,u,u,2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm8, %ymm11, %ymm14 {%k6}
+; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm14, %xmm18
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm18 = xmm18[u,u],zero,zero,zero,xmm18[6,13],zero,zero,xmm18[2,9,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,1,8,15],zero,zero,xmm14[4,11],zero,zero,xmm14[u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm18, %xmm14, %xmm14
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 208(%rdi), %xmm19
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm24 = xmm19[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm19[5,12]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm18 = xmm19[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm19[5,12]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 192(%rdi), %xmm20
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm25 = xmm20[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm24, %xmm25, %xmm24
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm24, %ymm0, %ymm24
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = xmm20[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm18, %xmm23, %xmm18
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm18, %ymm0, %ymm18
 ; AVX512BW-ONLY-FAST-NEXT:    movl $-134217728, %edi # imm = 0xF8000000
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %edi, %k2
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm24, %ymm18 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm24 = xmm7[3,10],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm25 = zero,zero,xmm5[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm24, %xmm25, %xmm24
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm24, %zmm18, %zmm18
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %zmm23, %zmm18 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm18, %ymm14 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm18 = xmm7[3,10],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = zero,zero,xmm6[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm18, %xmm23, %xmm18
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm18, %zmm14, %zmm18
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %zmm0, %zmm18 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k4
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm22, %ymm21 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm21
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    movabsq $-137438953472, %rax # imm = 0xFFFFFFE000000000
 ; AVX512BW-ONLY-FAST-NEXT:    kmovq %rax, %k2
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %zmm21, %zmm2 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm4, %ymm6, %ymm21 {%k6}
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm21, %xmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,u,u,u,u],zero,zero,xmm22[2,9],zero,zero,zero,xmm22[5,12,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm21[u,u,u,u,u,4,11],zero,zero,xmm21[0,7,14],zero,zero,xmm21[u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm22, %xmm21, %xmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm17 = ymm17[u,u,u,u,u,u,u,u,u,u,u,u,u,u,3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %zmm0, %zmm2 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm4, %ymm5, %ymm0 {%k6}
+; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm14
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,u],zero,zero,xmm14[2,9],zero,zero,zero,xmm14[5,12,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm0, %xmm14, %xmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm17[u,u,u,u,u,u,u,u,u,u,u,u,u,u,3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k3 # 2-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %ymm17, %ymm21 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %ymm14, %ymm0 {%k3}
 ; AVX512BW-ONLY-FAST-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm12, %ymm10, %ymm17 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm17[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm17[5,12],zero,zero
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm17, %xmm17
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u,u,u,u],zero,zero,xmm17[0,7,14],zero,zero,xmm17[3,10]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm22, %xmm17, %xmm17
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm17, %ymm21 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %zmm17, %zmm13 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm4, %ymm6, %ymm17 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm17, %xmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm21[u,u,u,u,u],zero,zero,xmm21[3,10],zero,zero,zero,xmm21[6,13,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u,u,5,12],zero,zero,xmm17[1,8,15],zero,zero,xmm17[u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm21, %xmm17, %xmm17
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm16 = ymm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %ymm16, %ymm17 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm12, %ymm10, %ymm16 {%k7}
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm16[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm16[6,13],zero,zero
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm16, %xmm16
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = xmm16[u,u,u,u,u,u,u],zero,zero,xmm16[1,8,15],zero,zero,xmm16[4,11]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm21, %xmm16, %xmm16
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm16, %ymm0, %ymm16
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm16, %ymm17 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm0, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %zmm16, %zmm15 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm4, %ymm6, %ymm16 {%k7}
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm16[u,u,u,u,u,6,13],zero,zero,xmm16[2,9],zero,zero,zero,xmm16[u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm16, %xmm16
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = xmm16[u,u,u,u,u],zero,zero,xmm16[4,11],zero,zero,xmm16[0,7,14,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm17, %xmm16, %xmm16
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %ymm14, %ymm16 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm12, %ymm10, %ymm14 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm14[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm14[5,12],zero,zero
+; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm14
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,u,u,u],zero,zero,xmm14[0,7,14],zero,zero,xmm14[3,10]
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm17, %xmm14, %xmm14
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm14, %ymm0 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %zmm0, %zmm13 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm4, %ymm5, %ymm0 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm14
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,u],zero,zero,xmm14[3,10],zero,zero,zero,xmm14[6,13,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero,xmm0[u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm0, %xmm14, %xmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %ymm14, %ymm0 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm12, %ymm10, %ymm14 {%k7}
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = xmm14[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm14[6,13],zero,zero
+; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm14
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,u,u,u],zero,zero,xmm14[1,8,15],zero,zero,xmm14[4,11]
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm16, %xmm14, %xmm14
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm14, %ymm0 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %zmm0, %zmm15 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm4, %ymm5, %ymm0 {%k7}
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm0[u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero,xmm0[u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm0, %xmm14, %xmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %ymm14, %ymm0 {%k3}
 ; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm10, %ymm12, %ymm14 {%k6}
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm14, %xmm17
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u,u,u,u],zero,zero,xmm17[2,9],zero,zero,zero,xmm17[5,12]
+; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm14, %xmm16
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = xmm16[u,u,u,u,u,u,u],zero,zero,xmm16[2,9],zero,zero,zero,xmm16[5,12]
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,u,u,u,4,11],zero,zero,xmm14[0,7,14],zero,zero
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm17, %xmm14, %xmm14
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm16, %xmm14, %xmm14
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm14, %ymm16 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm14
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %zmm14, %zmm18 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [0,9,2,3,4,13,6,7,24,17,10,11,28,21,14,31]
-; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm0, %zmm14, %zmm16
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [16,25,18,3,28,21,6,23,24,17,10,27,20,13,30,31]
-; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm0, %zmm14, %zmm17
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm14, %ymm0 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %zmm0, %zmm18 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [0,9,2,3,4,13,6,7,24,17,10,11,28,21,14,31]
+; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm26, %zmm0, %zmm16
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [16,25,18,3,28,21,6,23,24,17,10,27,20,13,30,31]
+; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm26, %zmm0, %zmm0
 ; AVX512BW-ONLY-FAST-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm6, %ymm4, %ymm14 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm14, %xmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm21[u,u,u,u],zero,zero,zero,xmm21[5,12],zero,zero,xmm21[1,8,15,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm5, %ymm4, %ymm14 {%k2}
+; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm14, %xmm17
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u],zero,zero,zero,xmm17[5,12],zero,zero,xmm17[1,8,15,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,0,7,14],zero,zero,xmm14[3,10],zero,zero,zero,xmm14[u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm21, %xmm14, %xmm14
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm17 = ymm17[u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %ymm17, %ymm14 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm6, %ymm4, %ymm17 {%k6}
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm17, %xmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm21[u,u,u,u],zero,zero,zero,xmm21[6,13],zero,zero,xmm21[2,9,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u,1,8,15],zero,zero,xmm17[4,11],zero,zero,xmm17[u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm21, %xmm17, %xmm17
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm17, %xmm14, %xmm14
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %ymm0, %ymm14 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm5, %ymm4, %ymm0 {%k6}
+; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm0, %xmm17
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u],zero,zero,zero,xmm17[6,13],zero,zero,xmm17[2,9,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm17, %xmm0, %xmm0
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm16 = ymm16[u,u,u,u,u,u,u,u,u,u,u,u,u,0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    movl $8176, %eax # imm = 0x1FF0
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm17, %ymm16 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm0, %ymm16 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm10, %ymm12, %ymm17 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm17, %xmm21
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm21[u,u,u,u,u,u,u],zero,zero,xmm21[3,10],zero,zero,zero,xmm21[6,13]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u,u,u,u,5,12],zero,zero,xmm17[1,8,15],zero,zero
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm21, %xmm17, %xmm17
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm17, %ymm14 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm10, %ymm12, %ymm17 {%k7}
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm17[u,u,u,u,u,u,u,6,13],zero,zero,xmm17[2,9],zero,zero,zero
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm17, %xmm17
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u,u,u,u],zero,zero,xmm17[4,11],zero,zero,xmm17[0,7,14]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm21, %xmm17, %xmm17
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm17
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm17, %ymm16 {%k4}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm9, %ymm3, %ymm17 {%k6}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm9, %ymm3, %ymm21 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm8, %ymm11, %ymm22 {%k7}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm10, %ymm12, %ymm0 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm0, %xmm17
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u,u,u,u],zero,zero,xmm17[3,10],zero,zero,zero,xmm17[6,13]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm17, %xmm0, %xmm0
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm0, %ymm14 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm10, %ymm12, %ymm0 {%k7}
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm0[u,u,u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero
+; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14]
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm17, %xmm0, %xmm0
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm0, %ymm16 {%k4}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm9, %ymm3, %ymm0 {%k6}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm9, %ymm3, %ymm17 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm8, %ymm11, %ymm21 {%k7}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %ymm9, %ymm3 {%k7}
 ; AVX512BW-ONLY-FAST-NEXT:    vpblendmw %ymm8, %ymm11, %ymm9 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %ymm11, %ymm8 {%k6}
@@ -11131,25 +11183,25 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm11, %xmm9, %xmm9
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm19[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm19[6,13]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = xmm20[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm11, %xmm23, %xmm11
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm20[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm11, %xmm22, %xmm11
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd {{[-0-9]+}}(%r{{[sb]}}p), %k3 # 4-byte Reload
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm11, %ymm9 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm22[u,u,3,10],zero,zero,zero,xmm22[6,13],zero,zero,xmm22[u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm22, %xmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,u],zero,zero,xmm22[1,8,15],zero,zero,xmm22[4,11,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm11, %xmm22, %xmm11
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm21[u,u,3,10],zero,zero,zero,xmm21[6,13],zero,zero,xmm21[u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm21, %xmm21
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm21[u,u],zero,zero,xmm21[1,8,15],zero,zero,xmm21[4,11,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm11, %xmm21, %xmm11
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm20[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = xmm19[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm19[0,7,14]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm22, %xmm23, %xmm22
-; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm22
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm22, %ymm11 {%k3}
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm8, %xmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,u],zero,zero,xmm22[2,9],zero,zero,zero,xmm22[5,12,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm20[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm19[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm19[0,7,14]
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm21, %xmm22, %xmm21
+; AVX512BW-ONLY-FAST-NEXT:    vinserti32x4 $1, %xmm21, %ymm0, %ymm21
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu8 %ymm21, %ymm11 {%k3}
+; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm8, %xmm21
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm21[u,u],zero,zero,xmm21[2,9],zero,zero,zero,xmm21[5,12,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,4,11],zero,zero,xmm8[0,7,14],zero,zero,xmm8[u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm22, %xmm8, %xmm8
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm21, %xmm8, %xmm8
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm20[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm19 = xmm19[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm19[1,8,15]
@@ -11160,71 +11212,73 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm1, %zmm19, %zmm19
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [16,9,2,19,12,5,22,23,24,17,26,27,20,29,30,31]
 ; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm1, %zmm20, %zmm20
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [8,1,2,19,12,5,22,15,0,9,26,11,4,29,14,7]
-; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm1, %zmm22, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm17, %xmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = zero,zero,xmm22[2,9],zero,zero,zero,xmm22[5,12,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[4,11],zero,zero,xmm17[0,7,14],zero,zero,xmm17[u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm22, %xmm17, %xmm17
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm17 {%k5} = ymm1[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [8,1,2,19,12,5,22,15,0,9,26,11,4,29,14,7]
+; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm1, %zmm21, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm0, %xmm21
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = zero,zero,xmm21[2,9],zero,zero,zero,xmm21[5,12,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm21, %xmm0, %xmm0
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 {%k5} = ymm1[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm7, %xmm22
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = xmm5[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm22 = xmm22[0],xmm23[0],xmm22[1],xmm23[1],xmm22[2],xmm23[2],xmm22[3],xmm23[3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm22, %zmm9, %zmm9
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %zmm9, %zmm17 {%k5}
-; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm21, %xmm9
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm7, %xmm21
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm6[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm21 = xmm21[0],xmm22[0],xmm21[1],xmm22[1],xmm21[2],xmm22[2],xmm21[3],xmm22[3]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm9, %zmm9
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %zmm9, %zmm0 {%k5}
+; AVX512BW-ONLY-FAST-NEXT:    vextracti32x4 $1, %ymm17, %xmm9
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,xmm9[3,10],zero,zero,zero,xmm9[6,13,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm21[5,12],zero,zero,xmm21[1,8,15],zero,zero,xmm21[u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm9, %xmm21, %xmm9
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[5,12],zero,zero,xmm17[1,8,15],zero,zero,xmm17[u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vporq %xmm9, %xmm17, %xmm9
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm9 {%k5} = ymm20[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm5[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm7[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm20 = xmm21[0],xmm20[0],xmm21[1],xmm20[1],xmm21[2],xmm20[2],xmm21[3],xmm20[3]
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm11, %zmm11
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm6[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm7[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm17 = xmm20[0],xmm17[0],xmm20[1],xmm17[1],xmm20[2],xmm17[2],xmm20[3],xmm17[3]
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm11, %zmm11
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %zmm11, %zmm9 {%k5}
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[6,13],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm3
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[4,11],zero,zero,xmm3[0,7,14,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm3, %xmm11, %xmm3
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm3 {%k5} = ymm19[u,u,u,u,u,u,u,u,u,5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm7[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3]
 ; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm1
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %zmm1, %zmm3 {%k5}
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %ymm12, %ymm10 {%k2}
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,u,u,u,4,u,u,7,u,25,18,11,28,21,14,u>
-; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm0, %zmm1, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm0, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,10,3,4,13,6,7,8,25,18,11,28,21,14,15]
+; AVX512BW-ONLY-FAST-NEXT:    vpermw %zmm26, %zmm1, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm0, %zmm6
 ; AVX512BW-ONLY-FAST-NEXT:    movw $-512, %ax # imm = 0xFE00
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %ymm6, %ymm4 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqu16 %ymm5, %ymm4 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm4[u,u,u,u,2,9],zero,zero,zero,xmm4[5,12],zero,zero,xmm4[u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm4
 ; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u],zero,zero,xmm4[0,7,14],zero,zero,xmm4[3,10,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm5, %xmm4, %xmm4
 ; AVX512BW-ONLY-FAST-NEXT:    movl $4186112, %edi # imm = 0x3FE000
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %edi, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 {%k1} = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,1,8,15,22,29,20,27,18,25,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 {%k1} = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,1,8,15,22,29,20,27,18,25,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm17 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm9 {%k1}
-; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[1,8,15]
-; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm10[u,u,u,u,u,u,0,7,14],zero,zero,xmm10[3,10],zero,zero,zero
-; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,35,36,37,38,39]
-; AVX512BW-ONLY-FAST-NEXT:    vpermi2w %zmm0, %zmm4, %zmm1
-; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm3 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa32 %zmm6, %zmm0 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm9 {%k1}
+; AVX512BW-ONLY-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm1
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u],zero,zero,zero,xmm1[5,12],zero,zero,xmm1[1,8,15]
+; AVX512BW-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm10[u,u,u,u,u,u,0,7,14],zero,zero,xmm10[3,10],zero,zero,zero
+; AVX512BW-ONLY-FAST-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX512BW-ONLY-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [16,17,18,19,20,21,22,23,24,25,26,43,44,45,46,47]
+; AVX512BW-ONLY-FAST-NEXT:    vpermi2w %zmm1, %zmm4, %zmm5
+; AVX512BW-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm1
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm3 {%k1}
 ; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rdi
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%rsi)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, (%rdx)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, (%rcx)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, (%r8)
-; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, (%r9)
+; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, (%r9)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, (%rdi)
 ; AVX512BW-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, (%rax)
 ; AVX512BW-ONLY-FAST-NEXT:    vzeroupper
@@ -11232,192 +11286,196 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ;
 ; AVX512DQBW-SLOW-LABEL: load_i8_stride7_vf64:
 ; AVX512DQBW-SLOW:       # %bb.0:
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm27
-; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <8,1,18,11,4,5,22,15,u,25,10,u,12,29,14,u>
-; AVX512DQBW-SLOW-NEXT:    vpermw %zmm27, %zmm0, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <0,1,18,11,4,21,14,7,8,25,10,u,28,13,u,15>
-; AVX512DQBW-SLOW-NEXT:    vpermw %zmm27, %zmm0, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <0,17,10,3,4,21,14,7,24,9,u,11,28,13,u,31>
-; AVX512DQBW-SLOW-NEXT:    vpermw %zmm27, %zmm0, %zmm16
-; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <16,17,10,3,20,13,6,23,24,25,u,27,28,u,30,31>
-; AVX512DQBW-SLOW-NEXT:    vpermw %zmm27, %zmm0, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm28
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdi), %zmm24
+; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <8,1,18,11,4,5,22,15,u,25,10,u,12,29,14,u>
+; AVX512DQBW-SLOW-NEXT:    vpermw %zmm24, %zmm1, %zmm18
+; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,1,18,11,4,21,14,7,8,25,10,u,28,13,u,15>
+; AVX512DQBW-SLOW-NEXT:    vpermw %zmm24, %zmm1, %zmm17
+; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,17,10,3,4,21,14,7,24,9,u,11,28,13,u,31>
+; AVX512DQBW-SLOW-NEXT:    vpermw %zmm24, %zmm1, %zmm11
+; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <16,17,10,3,20,13,6,23,24,25,u,27,28,u,30,31>
+; AVX512DQBW-SLOW-NEXT:    vpermw %zmm24, %zmm1, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqa (%rdi), %ymm6
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm2
 ; AVX512DQBW-SLOW-NEXT:    movw $-28382, %ax # imm = 0x9122
 ; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm2, %ymm28, %ymm0 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm2, %ymm6, %ymm1 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    kmovq %k1, %k2
 ; AVX512DQBW-SLOW-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm5
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[5,12],zero,zero,xmm5[1,8,15,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm5, %xmm0, %xmm26
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[5,12],zero,zero,xmm4[1,8,15,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,7,14],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm4, %xmm1, %xmm25
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm4, %ymm26 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm3, %ymm25 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm8
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm5
 ; AVX512DQBW-SLOW-NEXT:    movw $8772, %ax # imm = 0x2244
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k7
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm6, %ymm5, %ymm4 {%k7}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm7
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,u],zero,zero,xmm7[3,10],zero,zero,zero,xmm7[6,13,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,5,12],zero,zero,xmm4[1,8,15],zero,zero,xmm4[u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm7, %xmm4, %xmm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm10
+; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k6
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm8, %ymm5, %ymm3 {%k6}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[6,13,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,5,12],zero,zero,xmm3[1,8,15],zero,zero,xmm3[u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm7
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm21 = <u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u>
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm21, %xmm10, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm11
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm11[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
-; AVX512DQBW-SLOW-NEXT:    vbroadcasti32x4 {{.*#+}} ymm22 = [0,1,2,11,0,1,2,11]
-; AVX512DQBW-SLOW-NEXT:    # ymm22 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %ymm4, %ymm22, %ymm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm4
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm4[5,12,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm7
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm7[0,7,14],zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm9, %xmm12, %xmm9
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm9, %zmm8, %zmm8
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm21, %xmm7, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm9
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm9[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3]
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm3[0,1,2,3,4,5,6],ymm4[7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 240(%rdi), %xmm26
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = zero,zero,zero,xmm26[5,12,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm4[0,7,14],zero,zero,xmm4[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm12, %xmm13, %xmm12
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm12, %zmm10, %zmm10
 ; AVX512DQBW-SLOW-NEXT:    movabsq $137438429184, %rax # imm = 0x1FFFF80000
 ; AVX512DQBW-SLOW-NEXT:    kmovq %rax, %k5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm8, %zmm26 {%k5}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm9
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm8
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm10, %zmm25 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm13
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm12
 ; AVX512DQBW-SLOW-NEXT:    movw $9288, %ax # imm = 0x2448
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k4
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm9, %ymm8, %ymm12 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm12[u,u,u,u,u,3,10],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[u,u]
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm12
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u,u,u],zero,zero,xmm12[1,8,15],zero,zero,xmm12[4,11,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm13, %xmm12, %xmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm13
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm14
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm13, %ymm14, %ymm12 {%k7}
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm12[2,3,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0,1],ymm15[2],ymm12[3,4,5],ymm15[6],ymm12[7,8,9],ymm15[10],ymm12[11,12,13],ymm15[14],ymm12[15]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k3
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm13, %ymm12, %ymm10 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm10[u,u,u,u,u,3,10],zero,zero,zero,xmm10[6,13],zero,zero,xmm10[u,u]
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm10
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[u,u,u,u,u],zero,zero,xmm10[1,8,15],zero,zero,xmm10[4,11,u,u]
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm14, %xmm10, %xmm19
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 352(%rdi), %ymm16
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm15
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm16, %ymm15, %ymm10 {%k6}
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm10[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm14[2],ymm10[3,4,5],ymm14[6],ymm10[7,8,9],ymm14[10],ymm10[11,12,13],ymm14[14],ymm10[15]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    movw $3968, %ax # imm = 0xF80
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k6
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm12, %ymm19 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 416(%rdi), %ymm15
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm12
+; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k7
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm10, %ymm19 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 416(%rdi), %ymm14
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm10
 ; AVX512DQBW-SLOW-NEXT:    movw $4644, %ax # imm = 0x1224
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k3
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm15, %ymm12, %ymm20 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm20, %xmm23
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm23[u,u,u,u,u,u,u],zero,zero,zero,xmm23[6,13],zero,zero,xmm23[2,9]
+; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k4
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm14, %ymm10, %ymm20 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm20, %xmm22
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,u,u,u,u,u,u],zero,zero,zero,xmm22[6,13],zero,zero,xmm22[2,9]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm20 = xmm20[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm20[4,11],zero,zero
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm23, %xmm20, %xmm20
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm22, %xmm20, %xmm20
 ; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm20
 ; AVX512DQBW-SLOW-NEXT:    movl $-8388608, %eax # imm = 0xFF800000
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm2, %ymm28, %ymm23 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm23, %xmm24
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm24 = zero,zero,zero,xmm24[6,13],zero,zero,xmm24[2,9,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm23[1,8,15],zero,zero,xmm23[4,11],zero,zero,xmm23[u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm24, %xmm23, %xmm23
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm16 = ymm16[u,u,u,u,u,u,u,u,u,0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm2, %ymm6, %ymm22 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm22, %xmm23
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = zero,zero,zero,xmm23[6,13],zero,zero,xmm23[2,9,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[1,8,15],zero,zero,xmm22[4,11],zero,zero,xmm22[u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm23, %xmm22, %xmm22
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    movl $511, %edi # imm = 0x1FF
 ; AVX512DQBW-SLOW-NEXT:    kmovd %edi, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm23, %ymm16 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm6, %ymm5, %ymm23 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm24 = xmm23[u,u,u,6,13],zero,zero,xmm23[2,9],zero,zero,zero,xmm23[u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm23, %xmm23
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm23[u,u,u],zero,zero,xmm23[4,11],zero,zero,xmm23[0,7,14,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm24, %xmm23, %xmm23
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm24 = xmm11[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm25 = xmm10[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm24 = xmm25[0],xmm24[0],xmm25[1],xmm24[1],xmm25[2],xmm24[2],xmm25[3],xmm24[3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %ymm24, %ymm22, %ymm23
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm24 = zero,zero,zero,xmm4[6,13,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm25 = xmm7[1,8,15],zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm24, %xmm25, %xmm24
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm24, %zmm23, %zmm23
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm23, %zmm16 {%k5}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm2, %ymm28, %ymm23 {%k7}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm24 = xmm23[2,9],zero,zero,zero,xmm23[5,12],zero,zero,xmm23[u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm23, %xmm23
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = zero,zero,xmm23[0,7,14],zero,zero,xmm23[3,10,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm24, %xmm23, %xmm23
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm22, %ymm11 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm8, %ymm5, %ymm22 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm22[u,u,u,6,13],zero,zero,xmm22[2,9],zero,zero,zero,xmm22[u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm22, %xmm22
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,u,u],zero,zero,xmm22[4,11],zero,zero,xmm22[0,7,14,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm23, %xmm22, %xmm22
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm0
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm9[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm7[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm22 = xmm23[0],xmm22[0],xmm23[1],xmm22[1],xmm23[2],xmm22[2],xmm23[3],xmm22[3]
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm1
+; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm26[6,13,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm4[1,8,15],zero,zero,xmm4[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm1, %xmm22, %xmm1
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm11 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm2, %ymm6, %ymm0 {%k6}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[2,9],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[0,7,14],zero,zero,xmm0[3,10,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX512DQBW-SLOW-NEXT:    movl $261632, %edi # imm = 0x3FE00
 ; AVX512DQBW-SLOW-NEXT:    kmovd %edi, %k5
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm23 {%k5} = ymm17[u,u,u,u,u,u,u,u,u,1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm5, %ymm6, %ymm17 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm17, %xmm24
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm24 = xmm24[u,u],zero,zero,zero,xmm24[5,12],zero,zero,xmm24[1,8,15,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,0,7,14],zero,zero,xmm17[3,10],zero,zero,zero,xmm17[u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm24, %xmm17, %xmm17
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm21, %xmm11, %xmm21
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm24 = xmm10[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm21 = xmm24[0],xmm21[0],xmm24[1],xmm21[1],xmm24[2],xmm21[2],xmm24[3],xmm21[3]
-; AVX512DQBW-SLOW-NEXT:    vpermt2d %ymm21, %ymm22, %ymm17
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm21 = xmm7[2,9],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = zero,zero,xmm4[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm21, %xmm22, %xmm21
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm21, %zmm17, %zmm17
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm23, %zmm17 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm2, %ymm28, %ymm21 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm21[3,10],zero,zero,zero,xmm21[6,13],zero,zero,xmm21[u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm21, %xmm21
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm21 = zero,zero,xmm21[1,8,15],zero,zero,xmm21[4,11,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm22, %xmm21, %xmm21
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm21 {%k5} = ymm18[u,u,u,u,u,u,u,u,u,2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm5, %ymm6, %ymm18 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm18, %xmm22
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,u],zero,zero,zero,xmm22[6,13],zero,zero,xmm22[2,9,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm18 = xmm18[u,u,1,8,15],zero,zero,xmm18[4,11],zero,zero,xmm18[u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm22, %xmm18, %xmm18
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm18, %ymm0, %ymm18
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm11[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm11[5,12]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm10[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm22, %xmm23, %xmm22
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm22
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 {%k5} = ymm17[u,u,u,u,u,u,u,u,u,1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm5, %ymm8, %ymm1 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm1, %xmm17
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u],zero,zero,zero,xmm17[5,12],zero,zero,xmm17[1,8,15,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,0,7,14],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm17, %xmm1, %xmm1
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm21, %xmm9, %xmm17
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm21 = xmm7[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm17 = xmm21[0],xmm17[0],xmm21[1],xmm17[1],xmm21[2],xmm17[2],xmm21[3],xmm17[3]
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm3
+; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm3[7]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm4[2,9],zero,zero,zero,xmm4[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = zero,zero,xmm26[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm3, %xmm17, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm1, %zmm17
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm0, %zmm17 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm2, %ymm6, %ymm0 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[1,8,15],zero,zero,xmm0[4,11,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 {%k5} = ymm18[u,u,u,u,u,u,u,u,u,2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm5, %ymm8, %ymm1 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,zero,xmm3[6,13],zero,zero,xmm3[2,9,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,1,8,15],zero,zero,xmm1[4,11],zero,zero,xmm1[u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm9[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm9[5,12]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm18 = xmm7[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm3, %xmm18, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
 ; AVX512DQBW-SLOW-NEXT:    movl $-134217728, %edi # imm = 0xF8000000
 ; AVX512DQBW-SLOW-NEXT:    kmovd %edi, %k2
 ; AVX512DQBW-SLOW-NEXT:    kmovd %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm22, %ymm18 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm7[3,10],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = zero,zero,xmm4[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm22, %xmm23, %xmm22
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm22, %zmm18, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm21, %zmm18 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm3, %ymm1 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm4[3,10],zero,zero,zero,xmm4[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm18 = zero,zero,xmm26[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm3, %xmm18, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm1, %zmm18
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm0, %zmm18 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k2
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm20, %ymm19 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm0, %zmm19
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm0, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    movabsq $-137438953472, %rax # imm = 0xFFFFFFE000000000
 ; AVX512DQBW-SLOW-NEXT:    kmovq %rax, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm19, %zmm26 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm8, %ymm9, %ymm19 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm19, %xmm20
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm20 = xmm20[u,u,u,u,u],zero,zero,xmm20[2,9],zero,zero,zero,xmm20[5,12,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm19[u,u,u,u,u,4,11],zero,zero,xmm19[0,7,14],zero,zero,xmm19[u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm20, %xmm19, %xmm19
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm13, %ymm14, %ymm0 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8,9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm0, %ymm19 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm15, %ymm12, %ymm0 {%k7}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm0[5,12],zero,zero
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[0,7,14],zero,zero,xmm0[3,10]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm25 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm12, %ymm13, %ymm0 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u],zero,zero,xmm1[2,9],zero,zero,zero,xmm1[5,12,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm19 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm0, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm16 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm8, %ymm9, %ymm0 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm16, %ymm15, %ymm1 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3],ymm1[4,5],ymm3[6],ymm1[7,8,9,10],ymm3[11],ymm1[12,13],ymm3[14],ymm1[15]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm14, %ymm10, %ymm1 {%k6}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm1[5,12],zero,zero
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u],zero,zero,xmm1[0,7,14],zero,zero,xmm1[3,10]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm3, %xmm1, %xmm1
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm11 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm12, %ymm13, %ymm0 {%k6}
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[6,13,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero,xmm0[u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm14, %ymm13, %ymm1 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm15, %ymm16, %ymm1 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
 ; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1,2],ymm3[3],ymm1[4,5,6],ymm3[7,8],ymm1[9,10],ymm3[11],ymm1[12,13,14],ymm3[15]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm15, %ymm12, %ymm1 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm14, %ymm10, %ymm1 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm1[6,13],zero,zero
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u],zero,zero,xmm1[1,8,15],zero,zero,xmm1[4,11]
@@ -11426,17 +11484,17 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k2}
 ; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm17 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm8, %ymm9, %ymm0 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm12, %ymm13, %ymm0 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero,xmm0[u,u]
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm14, %ymm13, %ymm1 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm15, %ymm16, %ymm1 {%k6}
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
 ; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1,2,3],ymm3[4],ymm1[5,6],ymm3[7,8],ymm1[9,10,11],ymm3[12],ymm1[13,14],ymm3[15]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm12, %ymm15, %ymm1 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm1, %ymm0 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm10, %ymm14, %ymm1 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm3
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[5,12]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u,4,11],zero,zero,xmm1[0,7,14],zero,zero
@@ -11446,144 +11504,145 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm18 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm9, %ymm8, %ymm0 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm13, %ymm12, %ymm0 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u],zero,zero,zero,xmm1[5,12],zero,zero,xmm1[1,8,15,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u]
 ; AVX512DQBW-SLOW-NEXT:    vporq %xmm1, %xmm0, %xmm19
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm14, %ymm13, %ymm0 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm15, %ymm16, %ymm0 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,3,0,1]
 ; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6,7,8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14,15]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm0, %ymm19 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm9, %ymm8, %ymm0 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm0, %ymm19 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm13, %ymm12, %ymm0 {%k4}
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u],zero,zero,zero,xmm1[6,13],zero,zero,xmm1[2,9,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm13, %ymm14, %ymm1 {%k1}
-; AVX512DQBW-SLOW-NEXT:    kmovq %k1, %k6
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm16, %ymm15, %ymm1 {%k1}
+; AVX512DQBW-SLOW-NEXT:    kmovq %k1, %k7
 ; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
 ; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3,4],ymm3[5],ymm1[6,7,8],ymm3[9],ymm1[10,11,12],ymm3[13],ymm1[14,15]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm20 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    movl $8176, %eax # imm = 0x1FF0
 ; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm20 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm12, %ymm15, %ymm0 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm10, %ymm14, %ymm0 {%k6}
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,u],zero,zero,xmm1[3,10],zero,zero,zero,xmm1[6,13]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm19 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm12, %ymm15, %ymm0 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm10, %ymm14, %ymm0 {%k3}
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[u,u,u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14]
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm20 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm28, %ymm2, %ymm0 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm28, %ymm2, %ymm21 {%k7}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm5, %ymm6, %ymm1 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm28, %ymm2 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm5, %ymm6, %ymm3 {%k7}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm3[u,u,2,9],zero,zero,zero,xmm3[5,12],zero,zero,xmm3[u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm3
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,xmm3[0,7,14],zero,zero,xmm3[3,10,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm22, %xmm3, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm5, %ymm8, %ymm22 {%k6}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm6, %ymm2, %ymm21 {%k6}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm16, %ymm15 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm15[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm15[0,1],ymm0[2],ymm15[3,4],ymm0[5],ymm15[6,7,8,9],ymm0[10],ymm15[11,12],ymm0[13],ymm15[14,15]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm13, %ymm12 {%k6}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm12[u,u,u,u,2,9],zero,zero,zero,xmm12[5,12],zero,zero,xmm12[u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u],zero,zero,xmm3[0,7,14],zero,zero,xmm3[3,10,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm1, %xmm3, %xmm12
+; AVX512DQBW-SLOW-NEXT:    movl $4186112, %eax # imm = 0x3FE000
+; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 {%k1} = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,1,8,15,22,29,20,27,18,25,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm14, %ymm10 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm6, %ymm2, %ymm0 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm5, %ymm8, %ymm1 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm6, %ymm2 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm8, %ymm5 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm22[u,u,2,9],zero,zero,zero,xmm22[5,12],zero,zero,xmm22[u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm22, %xmm6
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,xmm6[0,7,14],zero,zero,xmm6[3,10,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm3, %xmm6, %xmm3
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm11[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm11[6,13]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm10[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm22, %xmm23, %xmm22
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm22
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm9[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm9[6,13]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm6, %xmm8, %xmm6
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
 ; AVX512DQBW-SLOW-NEXT:    kmovd {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 4-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm22, %ymm3 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm1[u,u,3,10],zero,zero,zero,xmm1[6,13],zero,zero,xmm1[u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm6, %ymm3 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm1[u,u,3,10],zero,zero,zero,xmm1[6,13],zero,zero,xmm1[u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u],zero,zero,xmm1[1,8,15],zero,zero,xmm1[4,11,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm22, %xmm1, %xmm1
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm6, %xmm1, %xmm1
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = xmm10[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm11[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm11[0,7,14]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm22, %xmm23, %xmm22
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm22
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm22, %ymm1 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm6, %ymm5 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm9[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm9[0,7,14]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm6, %xmm8, %xmm6
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm6, %ymm1 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm6
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,xmm6[2,9],zero,zero,zero,xmm6[5,12,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,4,11],zero,zero,xmm5[0,7,14],zero,zero,xmm5[u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm6, %xmm5, %xmm5
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm10[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm11[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm11[1,8,15]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm6, %xmm10, %xmm6
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm9[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm9[1,8,15]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm6, %xmm7, %xmm6
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm6, %ymm5 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <16,9,2,19,20,13,6,23,24,u,26,27,28,u,30,31>
-; AVX512DQBW-SLOW-NEXT:    vpermw %zmm27, %zmm6, %zmm6
-; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = <16,9,2,19,12,5,22,23,24,u,26,27,u,29,30,31>
-; AVX512DQBW-SLOW-NEXT:    vpermw %zmm27, %zmm10, %zmm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = <8,1,2,19,12,5,22,15,u,9,26,11,u,29,14,u>
-; AVX512DQBW-SLOW-NEXT:    vpermw %zmm27, %zmm11, %zmm11
-; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm0, %xmm22
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm22 = zero,zero,xmm22[2,9],zero,zero,zero,xmm22[5,12,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpermw %zmm24, %zmm6, %zmm6
+; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <16,9,2,19,12,5,22,23,24,u,26,27,u,29,30,31>
+; AVX512DQBW-SLOW-NEXT:    vpermw %zmm24, %zmm7, %zmm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = <8,1,2,19,12,5,22,15,u,9,26,11,u,29,14,u>
+; AVX512DQBW-SLOW-NEXT:    vpermw %zmm24, %zmm8, %zmm8
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm9
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,xmm9[2,9],zero,zero,zero,xmm9[5,12,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm22, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 {%k5} = ymm11[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm11, %xmm7, %xmm22
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm4[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm22 = xmm22[0],xmm23[0],xmm22[1],xmm23[1],xmm22[2],xmm23[2],xmm22[3],xmm23[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm3, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm0, %xmm9, %xmm0
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 {%k5} = ymm8[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm8, %xmm4, %xmm9
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm26[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3]
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm3, %zmm3
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm3, %zmm0 {%k5}
+; AVX512DQBW-SLOW-NEXT:    movw $-512, %ax # imm = 0xFE00
 ; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm21, %xmm3
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[3,10],zero,zero,zero,xmm3[6,13,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm21 = xmm21[5,12],zero,zero,xmm21[1,8,15],zero,zero,xmm21[u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm3, %xmm21, %xmm3
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 {%k5} = ymm10[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm4[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm21 = xmm7[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm21[0],xmm10[0],xmm21[1],xmm10[1],xmm21[2],xmm10[2],xmm21[3],xmm10[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm1, %zmm1
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm21[5,12],zero,zero,xmm21[1,8,15],zero,zero,xmm21[u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm3, %xmm9, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 {%k5} = ymm7[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm26[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm4[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3]
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm1
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm1, %zmm3 {%k5}
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm2[6,13],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[4,11],zero,zero,xmm2[0,7,14,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm1, %xmm2, %xmm1
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 {%k5} = ymm6[u,u,u,u,u,u,u,u,u,5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm11, %xmm4, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm7[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm8, %xmm26, %xmm2
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
 ; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm5, %zmm2
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm2, %zmm1 {%k5}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm13, %ymm14 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm14[2,3,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm14[0,1],ymm2[2],ymm14[3,4],ymm2[5],ymm14[6,7,8,9],ymm2[10],ymm14[11,12],ymm2[13],ymm14[14,15]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm9, %ymm8 {%k7}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm8[u,u,u,u,2,9],zero,zero,zero,xmm8[5,12],zero,zero,xmm8[u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm5
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,xmm5[0,7,14],zero,zero,xmm5[3,10,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm5, %xmm4
-; AVX512DQBW-SLOW-NEXT:    movl $4186112, %eax # imm = 0x3FE000
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 {%k1} = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,1,8,15,22,29,20,27,18,25,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    movw $-512, %ax # imm = 0xFE00
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm15, %ymm12 {%k6}
 ; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
 ; AVX512DQBW-SLOW-NEXT:    vinserti32x8 $1, %ymm19, %zmm0, %zmm0 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    vinserti32x8 $1, %ymm20, %zmm0, %zmm3 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm2
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm10, %xmm2
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm12[u,u,u,u,u,u,0,7,14],zero,zero,xmm12[3,10],zero,zero,zero
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm2, %xmm5, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,1,2,3,4,5,6,7,8,9,10,35,36,37,38,39]
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm2, %zmm4, %zmm5
-; AVX512DQBW-SLOW-NEXT:    vinserti32x8 $1, %ymm5, %zmm0, %zmm1 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm10[u,u,u,u,u,u,0,7,14],zero,zero,xmm10[3,10],zero,zero,zero
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm2, %xmm4, %xmm2
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm12[0,1,2],ymm2[3,4,5,6,7],ymm12[8,9,10],ymm2[11,12,13,14,15]
+; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm12[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vinserti32x8 $1, %ymm2, %zmm0, %zmm1 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rdi
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm26, (%rsi)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, (%rdx)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm25, (%rsi)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, (%rdx)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, (%rcx)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, (%r8)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm0, (%r9)
@@ -11594,22 +11653,22 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ;
 ; AVX512DQBW-FAST-LABEL: load_i8_stride7_vf64:
 ; AVX512DQBW-FAST:       # %bb.0:
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 320(%rdi), %zmm26
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 64(%rdi), %zmm1
 ; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [24,17,2,19,28,21,6,31,16,9,26,27,20,13,30,23]
-; AVX512DQBW-FAST-NEXT:    vpermw %zmm0, %zmm2, %zmm14
+; AVX512DQBW-FAST-NEXT:    vpermw %zmm26, %zmm2, %zmm24
 ; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [8,1,18,11,4,5,22,15,0,25,10,3,12,29,14,7]
 ; AVX512DQBW-FAST-NEXT:    vpermw %zmm1, %zmm2, %zmm18
 ; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [24,17,2,27,20,5,22,31,16,9,26,19,12,29,30,23]
-; AVX512DQBW-FAST-NEXT:    vpermw %zmm0, %zmm2, %zmm16
+; AVX512DQBW-FAST-NEXT:    vpermw %zmm26, %zmm2, %zmm16
 ; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,18,11,4,21,14,7,8,25,10,3,28,13,6,15]
-; AVX512DQBW-FAST-NEXT:    vpermw %zmm1, %zmm2, %zmm15
+; AVX512DQBW-FAST-NEXT:    vpermw %zmm1, %zmm2, %zmm25
 ; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,2,11,4,5,14,7,8,9,26,19,12,29,22,15]
-; AVX512DQBW-FAST-NEXT:    vpermw %zmm0, %zmm2, %zmm17
+; AVX512DQBW-FAST-NEXT:    vpermw %zmm26, %zmm2, %zmm17
 ; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,17,10,3,4,21,14,7,24,9,2,11,28,13,6,31]
 ; AVX512DQBW-FAST-NEXT:    vpermw %zmm1, %zmm2, %zmm13
 ; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,10,3,4,5,14,7,8,25,18,11,12,29,22,15]
-; AVX512DQBW-FAST-NEXT:    vpermw %zmm0, %zmm2, %zmm10
+; AVX512DQBW-FAST-NEXT:    vpermw %zmm26, %zmm2, %zmm10
 ; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [16,17,10,3,20,13,6,23,24,25,18,27,28,21,30,31]
 ; AVX512DQBW-FAST-NEXT:    vpermw %zmm1, %zmm2, %zmm4
 ; AVX512DQBW-FAST-NEXT:    vmovdqa (%rdi), %ymm9
@@ -11637,32 +11696,31 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm5
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[6,13,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,5,12],zero,zero,xmm4[1,8,15],zero,zero,xmm4[u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpor %xmm5, %xmm4, %xmm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,1,2,4,6>
+; AVX512DQBW-FAST-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,1,2,4,6>
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdi), %ymm19
-; AVX512DQBW-FAST-NEXT:    vpermd %ymm19, %ymm4, %ymm4
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,23,26,29]
-; AVX512DQBW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm20 = [0,1,2,15,0,1,2,15]
-; AVX512DQBW-FAST-NEXT:    # ymm20 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQBW-FAST-NEXT:    vpermt2d %ymm4, %ymm20, %ymm5
-; AVX512DQBW-FAST-NEXT:    vmovdqa 240(%rdi), %xmm4
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm4[5,12,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpermd %ymm19, %ymm5, %ymm5
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,23,26,29]
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm5[7]
+; AVX512DQBW-FAST-NEXT:    vmovdqa 240(%rdi), %xmm5
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm5[5,12,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vmovdqa 224(%rdi), %xmm7
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm7[0,7,14],zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpor %xmm6, %xmm12, %xmm6
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm5, %zmm5
+; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm4, %zmm4
 ; AVX512DQBW-FAST-NEXT:    movabsq $137438429184, %rax # imm = 0x1FFFF80000
 ; AVX512DQBW-FAST-NEXT:    kmovq %rax, %k5
-; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm5, %zmm2 {%k5}
+; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm4, %zmm2 {%k5}
 ; AVX512DQBW-FAST-NEXT:    vmovdqa 288(%rdi), %ymm6
-; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%rdi), %ymm5
+; AVX512DQBW-FAST-NEXT:    vmovdqa 256(%rdi), %ymm4
 ; AVX512DQBW-FAST-NEXT:    movw $9288, %ax # imm = 0x2448
 ; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k6
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm6, %ymm5, %ymm12 {%k6}
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm12[u,u,u,u,u,3,10],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[u,u]
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm6, %ymm4, %ymm12 {%k6}
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm12[u,u,u,u,u,3,10],zero,zero,zero,xmm12[6,13],zero,zero,xmm12[u,u]
 ; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm12
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u,u,u],zero,zero,xmm12[1,8,15],zero,zero,xmm12[4,11,u,u]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm21, %xmm12, %xmm21
+; AVX512DQBW-FAST-NEXT:    vporq %xmm20, %xmm12, %xmm21
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    movw $3968, %ax # imm = 0xF80
 ; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k7
@@ -11671,179 +11729,181 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-FAST-NEXT:    vmovdqa 384(%rdi), %ymm10
 ; AVX512DQBW-FAST-NEXT:    movw $4644, %ax # imm = 0x1224
 ; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k4
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm12, %ymm10, %ymm22 {%k4}
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm22, %xmm23
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = xmm23[u,u,u,u,u,u,u],zero,zero,zero,xmm23[6,13],zero,zero,xmm23[2,9]
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm22[4,11],zero,zero
-; AVX512DQBW-FAST-NEXT:    vporq %xmm23, %xmm22, %xmm22
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm22
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm12, %ymm10, %ymm20 {%k4}
+; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm20, %xmm22
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,u,u,u,u,u,u],zero,zero,zero,xmm22[6,13],zero,zero,xmm22[2,9]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm20[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm20[4,11],zero,zero
+; AVX512DQBW-FAST-NEXT:    vporq %xmm22, %xmm20, %xmm20
+; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm22
 ; AVX512DQBW-FAST-NEXT:    movl $-8388608, %eax # imm = 0xFF800000
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm3, %ymm9, %ymm23 {%k4}
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm23, %xmm24
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm24 = zero,zero,zero,xmm24[6,13],zero,zero,xmm24[2,9,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = xmm23[1,8,15],zero,zero,xmm23[4,11],zero,zero,xmm23[u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm24, %xmm23, %xmm23
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm3, %ymm9, %ymm20 {%k4}
+; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm20, %xmm23
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = zero,zero,zero,xmm23[6,13],zero,zero,xmm23[2,9,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm20[1,8,15],zero,zero,xmm20[4,11],zero,zero,xmm20[u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vporq %xmm23, %xmm20, %xmm20
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,u,u,u,u,u,0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    movl $511, %r10d # imm = 0x1FF
 ; AVX512DQBW-FAST-NEXT:    kmovd %r10d, %k1
-; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm23, %ymm13 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm11, %ymm8, %ymm23 {%k6}
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm24 = xmm23[u,u,u,6,13],zero,zero,xmm23[2,9],zero,zero,zero,xmm23[u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm23, %xmm23
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = xmm23[u,u,u],zero,zero,xmm23[4,11],zero,zero,xmm23[0,7,14,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm24, %xmm23, %xmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = <u,u,u,u,1,3,4,6>
-; AVX512DQBW-FAST-NEXT:    vpermd %ymm19, %ymm24, %ymm24
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm24 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,17,20,27,30]
-; AVX512DQBW-FAST-NEXT:    vpermt2d %ymm24, %ymm20, %ymm23
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm24 = zero,zero,zero,xmm4[6,13,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm25 = xmm7[1,8,15],zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm24, %xmm25, %xmm24
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, %xmm24, %zmm23, %zmm23
-; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm23, %zmm13 {%k5}
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm3, %ymm9, %ymm23 {%k3}
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm24 = xmm23[2,9],zero,zero,zero,xmm23[5,12],zero,zero,xmm23[u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm23, %xmm23
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = zero,zero,xmm23[0,7,14],zero,zero,xmm23[3,10,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm24, %xmm23, %xmm23
+; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm20, %ymm13 {%k1}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm11, %ymm8, %ymm20 {%k6}
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = xmm20[u,u,u,6,13],zero,zero,xmm20[2,9],zero,zero,zero,xmm20[u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm20, %xmm20
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm20[u,u,u],zero,zero,xmm20[4,11],zero,zero,xmm20[0,7,14,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vporq %xmm23, %xmm20, %xmm20
+; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, %xmm20, %ymm0, %ymm14
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = <u,u,u,u,1,3,4,6>
+; AVX512DQBW-FAST-NEXT:    vpermd %ymm19, %ymm20, %ymm20
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,17,20,27,30]
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5,6],ymm15[7]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = zero,zero,zero,xmm5[6,13,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm7[1,8,15],zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vporq %xmm15, %xmm20, %xmm15
+; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, %xmm15, %zmm14, %zmm14
+; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm14, %zmm13 {%k5}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm3, %ymm9, %ymm14 {%k3}
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[2,9],zero,zero,zero,xmm14[5,12],zero,zero,xmm14[u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm14
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = zero,zero,xmm14[0,7,14],zero,zero,xmm14[3,10,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpor %xmm15, %xmm14, %xmm14
 ; AVX512DQBW-FAST-NEXT:    movl $261632, %r10d # imm = 0x3FE00
 ; AVX512DQBW-FAST-NEXT:    kmovd %r10d, %k5
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm23 {%k5} = ymm15[u,u,u,u,u,u,u,u,u,1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm14 {%k5} = ymm25[u,u,u,u,u,u,u,u,u,1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm8, %ymm11, %ymm15 {%k2}
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm15, %xmm24
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm24 = xmm24[u,u],zero,zero,zero,xmm24[5,12],zero,zero,xmm24[1,8,15,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm15, %xmm20
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm20[u,u],zero,zero,zero,xmm20[5,12],zero,zero,xmm20[1,8,15,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm15[u,u,0,7,14],zero,zero,xmm15[3,10],zero,zero,zero,xmm15[u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm24, %xmm15, %xmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = <u,u,u,u,1,3,5,6>
-; AVX512DQBW-FAST-NEXT:    vpermd %ymm19, %ymm24, %ymm19
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm19 = ymm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,21,24,31]
-; AVX512DQBW-FAST-NEXT:    vpermt2d %ymm19, %ymm20, %ymm15
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm19 = xmm7[2,9],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = zero,zero,xmm4[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm19, %xmm20, %xmm19
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, %xmm19, %zmm15, %zmm15
-; AVX512DQBW-FAST-NEXT:    vmovdqu16 %zmm23, %zmm15 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm3, %ymm9, %ymm19 {%k6}
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm19[3,10],zero,zero,zero,xmm19[6,13],zero,zero,xmm19[u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm19, %xmm19
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm19 = zero,zero,xmm19[1,8,15],zero,zero,xmm19[4,11,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm20, %xmm19, %xmm23
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm23 {%k5} = ymm18[u,u,u,u,u,u,u,u,u,2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm8, %ymm11, %ymm18 {%k4}
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm18, %xmm19
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm19 = xmm19[u,u],zero,zero,zero,xmm19[6,13],zero,zero,xmm19[2,9,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm18 = xmm18[u,u,1,8,15],zero,zero,xmm18[4,11],zero,zero,xmm18[u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm19, %xmm18, %xmm18
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, %xmm18, %ymm0, %ymm18
+; AVX512DQBW-FAST-NEXT:    vporq %xmm20, %xmm15, %xmm15
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = <u,u,u,u,1,3,5,6>
+; AVX512DQBW-FAST-NEXT:    vpermd %ymm19, %ymm20, %ymm19
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,21,24,31]
+; AVX512DQBW-FAST-NEXT:    vpblendd {{.*#+}} ymm0 = ymm15[0,1,2,3,4,5,6],ymm0[7]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm7[2,9],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm19 = zero,zero,xmm5[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vporq %xmm15, %xmm19, %xmm15
+; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, %xmm15, %zmm0, %zmm15
+; AVX512DQBW-FAST-NEXT:    vmovdqu16 %zmm14, %zmm15 {%k1}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm3, %ymm9, %ymm0 {%k6}
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm0[3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[1,8,15],zero,zero,xmm0[4,11,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpor %xmm0, %xmm14, %xmm0
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 {%k5} = ymm18[u,u,u,u,u,u,u,u,u,2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm8, %ymm11, %ymm14 {%k4}
+; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm14, %xmm18
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm18 = xmm18[u,u],zero,zero,zero,xmm18[6,13],zero,zero,xmm18[2,9,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,1,8,15],zero,zero,xmm14[4,11],zero,zero,xmm14[u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vporq %xmm18, %xmm14, %xmm14
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 208(%rdi), %xmm19
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm24 = xmm19[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm19[5,12]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm18 = xmm19[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm19[5,12]
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 192(%rdi), %xmm20
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm25 = xmm20[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
-; AVX512DQBW-FAST-NEXT:    vporq %xmm24, %xmm25, %xmm24
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, %xmm24, %ymm0, %ymm24
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = xmm20[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
+; AVX512DQBW-FAST-NEXT:    vporq %xmm18, %xmm23, %xmm18
+; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, %xmm18, %ymm0, %ymm18
 ; AVX512DQBW-FAST-NEXT:    movl $-134217728, %edi # imm = 0xF8000000
 ; AVX512DQBW-FAST-NEXT:    kmovd %edi, %k2
 ; AVX512DQBW-FAST-NEXT:    kmovd %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm24, %ymm18 {%k2}
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm24 = xmm7[3,10],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm25 = zero,zero,xmm4[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm24, %xmm25, %xmm24
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, %xmm24, %zmm18, %zmm18
-; AVX512DQBW-FAST-NEXT:    vmovdqu16 %zmm23, %zmm18 {%k1}
+; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm18, %ymm14 {%k2}
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm18 = xmm7[3,10],zero,zero,zero,xmm7[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = zero,zero,xmm5[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vporq %xmm18, %xmm23, %xmm18
+; AVX512DQBW-FAST-NEXT:    vinserti32x4 $2, %xmm18, %zmm14, %zmm18
+; AVX512DQBW-FAST-NEXT:    vmovdqu16 %zmm0, %zmm18 {%k1}
 ; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k3
 ; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm22, %ymm21 {%k3}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm21
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm0
 ; AVX512DQBW-FAST-NEXT:    movabsq $-137438953472, %rax # imm = 0xFFFFFFE000000000
 ; AVX512DQBW-FAST-NEXT:    kmovq %rax, %k2
-; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm21, %zmm2 {%k2}
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm5, %ymm6, %ymm21 {%k4}
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm21, %xmm22
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,u,u,u,u],zero,zero,xmm22[2,9],zero,zero,zero,xmm22[5,12,u,u]
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm21[u,u,u,u,u,4,11],zero,zero,xmm21[0,7,14],zero,zero,xmm21[u,u]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm22, %xmm21, %xmm21
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm17 = ymm17[u,u,u,u,u,u,u,u,u,u,u,u,u,u,3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vmovdqu16 %ymm17, %ymm21 {%k7}
+; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm2 {%k2}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm4, %ymm6, %ymm0 {%k4}
+; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm14
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,u],zero,zero,xmm14[2,9],zero,zero,zero,xmm14[5,12,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u]
+; AVX512DQBW-FAST-NEXT:    vpor %xmm0, %xmm14, %xmm0
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm17[u,u,u,u,u,u,u,u,u,u,u,u,u,u,3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vmovdqu16 %ymm14, %ymm0 {%k7}
 ; AVX512DQBW-FAST-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm12, %ymm10, %ymm17 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm17[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm17[5,12],zero,zero
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm17, %xmm17
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u,u,u,u],zero,zero,xmm17[0,7,14],zero,zero,xmm17[3,10]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm22, %xmm17, %xmm17
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm17
-; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm17, %ymm21 {%k3}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm17
-; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm17, %zmm13 {%k2}
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm5, %ymm6, %ymm17 {%k1}
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm17, %xmm21
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm21[u,u,u,u,u],zero,zero,xmm21[3,10],zero,zero,zero,xmm21[6,13,u,u]
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u,u,5,12],zero,zero,xmm17[1,8,15],zero,zero,xmm17[u,u]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm21, %xmm17, %xmm17
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm16 = ymm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vmovdqu16 %ymm16, %ymm17 {%k7}
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm12, %ymm10, %ymm16 {%k6}
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm16[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm16[6,13],zero,zero
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm16, %xmm16
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = xmm16[u,u,u,u,u,u,u],zero,zero,xmm16[1,8,15],zero,zero,xmm16[4,11]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm21, %xmm16, %xmm16
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, %xmm16, %ymm0, %ymm16
-; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm16, %ymm17 {%k3}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm0, %zmm16
-; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm16, %zmm15 {%k2}
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm5, %ymm6, %ymm16 {%k6}
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm16[u,u,u,u,u,6,13],zero,zero,xmm16[2,9],zero,zero,zero,xmm16[u,u]
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm16, %xmm16
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = xmm16[u,u,u,u,u],zero,zero,xmm16[4,11],zero,zero,xmm16[0,7,14,u,u]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm17, %xmm16, %xmm16
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vmovdqu16 %ymm14, %ymm16 {%k7}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm12, %ymm10, %ymm14 {%k1}
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm14[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm14[5,12],zero,zero
+; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm14
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,u,u,u],zero,zero,xmm14[0,7,14],zero,zero,xmm14[3,10]
+; AVX512DQBW-FAST-NEXT:    vporq %xmm17, %xmm14, %xmm14
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm14, %ymm0 {%k3}
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm13 {%k2}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm4, %ymm6, %ymm0 {%k1}
+; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm14
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,u],zero,zero,xmm14[3,10],zero,zero,zero,xmm14[6,13,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero,xmm0[u,u]
+; AVX512DQBW-FAST-NEXT:    vpor %xmm0, %xmm14, %xmm0
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vmovdqu16 %ymm14, %ymm0 {%k7}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm12, %ymm10, %ymm14 {%k6}
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = xmm14[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm14[6,13],zero,zero
+; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm14, %xmm14
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,u,u,u],zero,zero,xmm14[1,8,15],zero,zero,xmm14[4,11]
+; AVX512DQBW-FAST-NEXT:    vporq %xmm16, %xmm14, %xmm14
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm14, %ymm0 {%k3}
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm15 {%k2}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm4, %ymm6, %ymm0 {%k6}
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm0[u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero,xmm0[u,u]
+; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14,u,u]
+; AVX512DQBW-FAST-NEXT:    vpor %xmm0, %xmm14, %xmm0
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vmovdqu16 %ymm14, %ymm0 {%k7}
 ; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm10, %ymm12, %ymm14 {%k4}
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm14, %xmm17
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u,u,u,u],zero,zero,xmm17[2,9],zero,zero,zero,xmm17[5,12]
+; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm14, %xmm16
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = xmm16[u,u,u,u,u,u,u],zero,zero,xmm16[2,9],zero,zero,zero,xmm16[5,12]
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,u,u,u,4,11],zero,zero,xmm14[0,7,14],zero,zero
-; AVX512DQBW-FAST-NEXT:    vporq %xmm17, %xmm14, %xmm14
+; AVX512DQBW-FAST-NEXT:    vporq %xmm16, %xmm14, %xmm14
 ; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm14, %ymm16 {%k3}
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm0, %zmm14
-; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm14, %zmm18 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [0,9,2,3,4,13,6,7,24,17,10,11,28,21,14,31]
-; AVX512DQBW-FAST-NEXT:    vpermw %zmm0, %zmm14, %zmm16
+; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm14, %ymm0 {%k3}
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm18 {%k2}
+; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [0,9,2,3,4,13,6,7,24,17,10,11,28,21,14,31]
+; AVX512DQBW-FAST-NEXT:    vpermw %zmm26, %zmm0, %zmm0
 ; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [16,25,18,3,28,21,6,23,24,17,10,27,20,13,30,31]
-; AVX512DQBW-FAST-NEXT:    vpermw %zmm0, %zmm14, %zmm17
+; AVX512DQBW-FAST-NEXT:    vpermw %zmm26, %zmm14, %zmm16
 ; AVX512DQBW-FAST-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 2-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm6, %ymm5, %ymm14 {%k2}
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm14, %xmm21
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm21[u,u,u,u],zero,zero,zero,xmm21[5,12],zero,zero,xmm21[1,8,15,u,u]
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm6, %ymm4, %ymm14 {%k2}
+; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm14, %xmm17
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u],zero,zero,zero,xmm17[5,12],zero,zero,xmm17[1,8,15,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,u,u,0,7,14],zero,zero,xmm14[3,10],zero,zero,zero,xmm14[u,u]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm21, %xmm14, %xmm14
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm17 = ymm17[u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vmovdqu16 %ymm17, %ymm14 {%k7}
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm6, %ymm5, %ymm17 {%k4}
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm17, %xmm21
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm21[u,u,u,u],zero,zero,zero,xmm21[6,13],zero,zero,xmm21[2,9,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u,1,8,15],zero,zero,xmm17[4,11],zero,zero,xmm17[u,u,u]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm21, %xmm17, %xmm17
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm16 = ymm16[u,u,u,u,u,u,u,u,u,u,u,u,u,0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vporq %xmm17, %xmm14, %xmm14
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm16 = ymm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vmovdqu16 %ymm16, %ymm14 {%k7}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm6, %ymm4, %ymm16 {%k4}
+; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm16, %xmm17
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u],zero,zero,zero,xmm17[6,13],zero,zero,xmm17[2,9,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm16 = xmm16[u,u,u,u,1,8,15],zero,zero,xmm16[4,11],zero,zero,xmm16[u,u,u]
+; AVX512DQBW-FAST-NEXT:    vporq %xmm17, %xmm16, %xmm17
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm16 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    movl $8176, %eax # imm = 0x1FF0
 ; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k1
 ; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm17, %ymm16 {%k1}
 ; AVX512DQBW-FAST-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm10, %ymm12, %ymm17 {%k1}
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm17, %xmm21
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm21[u,u,u,u,u,u,u],zero,zero,xmm21[3,10],zero,zero,zero,xmm21[6,13]
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u,u,u,u,5,12],zero,zero,xmm17[1,8,15],zero,zero
-; AVX512DQBW-FAST-NEXT:    vporq %xmm21, %xmm17, %xmm17
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm17
-; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm17, %ymm14 {%k3}
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm10, %ymm12, %ymm17 {%k6}
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm17[u,u,u,u,u,u,u,6,13],zero,zero,xmm17[2,9],zero,zero,zero
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm17, %xmm17
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u,u,u,u],zero,zero,xmm17[4,11],zero,zero,xmm17[0,7,14]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm21, %xmm17, %xmm17
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm17
-; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm17, %ymm16 {%k3}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm10, %ymm12, %ymm0 {%k1}
+; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm0, %xmm17
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u,u,u,u],zero,zero,xmm17[3,10],zero,zero,zero,xmm17[6,13]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero
+; AVX512DQBW-FAST-NEXT:    vporq %xmm17, %xmm0, %xmm0
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm0, %ymm14 {%k3}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm10, %ymm12, %ymm0 {%k6}
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm0[u,u,u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero
+; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14]
+; AVX512DQBW-FAST-NEXT:    vporq %xmm17, %xmm0, %xmm0
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm0, %ymm16 {%k3}
 ; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm9, %ymm3, %ymm21 {%k4}
 ; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm9, %ymm3, %ymm17 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm8, %ymm11, %ymm22 {%k6}
+; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm8, %ymm11, %ymm0 {%k6}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu16 %ymm9, %ymm3 {%k6}
 ; AVX512DQBW-FAST-NEXT:    vpblendmw %ymm8, %ymm11, %ymm9 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu16 %ymm11, %ymm8 {%k4}
@@ -11853,99 +11913,101 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512DQBW-FAST-NEXT:    vpor %xmm11, %xmm9, %xmm9
 ; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm19[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm19[6,13]
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = xmm20[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
-; AVX512DQBW-FAST-NEXT:    vporq %xmm11, %xmm23, %xmm11
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm20[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
+; AVX512DQBW-FAST-NEXT:    vporq %xmm11, %xmm22, %xmm11
 ; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
 ; AVX512DQBW-FAST-NEXT:    kmovd {{[-0-9]+}}(%r{{[sb]}}p), %k3 # 4-byte Reload
 ; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm11, %ymm9 {%k3}
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm22[u,u,3,10],zero,zero,zero,xmm22[6,13],zero,zero,xmm22[u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm22, %xmm22
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,u],zero,zero,xmm22[1,8,15],zero,zero,xmm22[4,11,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm0[u,u,3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u],zero,zero,xmm0[1,8,15],zero,zero,xmm0[4,11,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpor %xmm0, %xmm11, %xmm0
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm20[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm19[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm19[0,7,14]
 ; AVX512DQBW-FAST-NEXT:    vporq %xmm11, %xmm22, %xmm11
 ; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm20[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = xmm19[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm19[0,7,14]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm22, %xmm23, %xmm22
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, %xmm22, %ymm0, %ymm22
-; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm22, %ymm11 {%k3}
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm8, %xmm22
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm22[u,u],zero,zero,xmm22[2,9],zero,zero,zero,xmm22[5,12,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm11, %ymm0 {%k3}
+; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm8, %xmm11
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u],zero,zero,xmm11[2,9],zero,zero,zero,xmm11[5,12,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,4,11],zero,zero,xmm8[0,7,14],zero,zero,xmm8[u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm22, %xmm8, %xmm8
+; AVX512DQBW-FAST-NEXT:    vpor %xmm11, %xmm8, %xmm8
 ; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm20[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm20[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm19 = xmm19[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm19[1,8,15]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm20, %xmm19, %xmm19
-; AVX512DQBW-FAST-NEXT:    vinserti32x4 $1, %xmm19, %ymm0, %ymm19
-; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm19, %ymm8 {%k3}
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [16,9,2,19,20,13,6,23,24,17,26,27,28,21,30,31]
+; AVX512DQBW-FAST-NEXT:    vporq %xmm11, %xmm19, %xmm11
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512DQBW-FAST-NEXT:    vmovdqu8 %ymm11, %ymm8 {%k3}
+; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [16,9,2,19,20,13,6,23,24,17,26,27,28,21,30,31]
+; AVX512DQBW-FAST-NEXT:    vpermw %zmm1, %zmm11, %zmm11
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [16,9,2,19,12,5,22,23,24,17,26,27,20,29,30,31]
 ; AVX512DQBW-FAST-NEXT:    vpermw %zmm1, %zmm19, %zmm19
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [16,9,2,19,12,5,22,23,24,17,26,27,20,29,30,31]
-; AVX512DQBW-FAST-NEXT:    vpermw %zmm1, %zmm20, %zmm20
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [8,1,2,19,12,5,22,15,0,9,26,11,4,29,14,7]
-; AVX512DQBW-FAST-NEXT:    vpermw %zmm1, %zmm22, %zmm1
-; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm21, %xmm22
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = zero,zero,xmm22[2,9],zero,zero,zero,xmm22[5,12,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [8,1,2,19,12,5,22,15,0,9,26,11,4,29,14,7]
+; AVX512DQBW-FAST-NEXT:    vpermw %zmm1, %zmm20, %zmm1
+; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm21, %xmm20
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = zero,zero,xmm20[2,9],zero,zero,zero,xmm20[5,12,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm21 = xmm21[4,11],zero,zero,xmm21[0,7,14],zero,zero,xmm21[u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vporq %xmm22, %xmm21, %xmm21
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm21 {%k5} = ymm1[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vporq %xmm20, %xmm21, %xmm20
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm20 {%k5} = ymm1[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512DQBW-FAST-NEXT:    vpshufb %xmm1, %xmm7, %xmm22
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm23 = xmm4[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm22 = xmm22[0],xmm23[0],xmm22[1],xmm23[1],xmm22[2],xmm23[2],xmm22[3],xmm23[3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm22, %zmm9, %zmm9
-; AVX512DQBW-FAST-NEXT:    vmovdqu16 %zmm9, %zmm21 {%k5}
+; AVX512DQBW-FAST-NEXT:    vpshufb %xmm1, %xmm7, %xmm21
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm22 = xmm5[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm21 = xmm21[0],xmm22[0],xmm21[1],xmm22[1],xmm21[2],xmm22[2],xmm21[3],xmm22[3]
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm9, %zmm9
+; AVX512DQBW-FAST-NEXT:    vmovdqu16 %zmm9, %zmm20 {%k5}
 ; AVX512DQBW-FAST-NEXT:    vextracti32x4 $1, %ymm17, %xmm9
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,xmm9[3,10],zero,zero,zero,xmm9[6,13,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[5,12],zero,zero,xmm17[1,8,15],zero,zero,xmm17[u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vporq %xmm9, %xmm17, %xmm9
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm9 {%k5} = ymm20[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm4[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm20 = xmm7[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm17 = xmm20[0],xmm17[0],xmm20[1],xmm17[1],xmm20[2],xmm17[2],xmm20[3],xmm17[3]
-; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm11, %zmm11
-; AVX512DQBW-FAST-NEXT:    vmovdqu16 %zmm11, %zmm9 {%k5}
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[6,13],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm9 {%k5} = ymm19[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm17 = xmm5[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm19 = xmm7[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm17 = xmm19[0],xmm17[0],xmm19[1],xmm17[1],xmm19[2],xmm17[2],xmm19[3],xmm17[3]
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm0, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqu16 %zmm0, %zmm9 {%k5}
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm3[6,13],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm3, %xmm3
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm3[4,11],zero,zero,xmm3[0,7,14,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpor %xmm3, %xmm11, %xmm3
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm3 {%k5} = ymm19[u,u,u,u,u,u,u,u,u,5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm7[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; AVX512DQBW-FAST-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 {%k5} = ymm11[u,u,u,u,u,u,u,u,u,5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm7[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
 ; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm1
-; AVX512DQBW-FAST-NEXT:    vmovdqu16 %zmm1, %zmm3 {%k5}
+; AVX512DQBW-FAST-NEXT:    vmovdqu16 %zmm1, %zmm0 {%k5}
 ; AVX512DQBW-FAST-NEXT:    vmovdqu16 %ymm12, %ymm10 {%k2}
-; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,u,u,u,4,u,u,7,u,25,18,11,28,21,14,u>
-; AVX512DQBW-FAST-NEXT:    vpermw %zmm0, %zmm1, %zmm0
+; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,10,3,4,13,6,7,8,25,18,11,28,21,14,15]
+; AVX512DQBW-FAST-NEXT:    vpermw %zmm26, %zmm1, %zmm1
 ; AVX512DQBW-FAST-NEXT:    movw $-512, %ax # imm = 0xFE00
-; AVX512DQBW-FAST-NEXT:    vmovdqu16 %ymm6, %ymm5 {%k1}
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm5[u,u,u,u,2,9],zero,zero,zero,xmm5[5,12],zero,zero,xmm5[u,u,u]
-; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm5, %xmm4
+; AVX512DQBW-FAST-NEXT:    vmovdqu16 %ymm6, %ymm4 {%k1}
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm4[u,u,u,u,2,9],zero,zero,zero,xmm4[5,12],zero,zero,xmm4[u,u,u]
+; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm4, %xmm4
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u],zero,zero,xmm4[0,7,14],zero,zero,xmm4[3,10,u,u,u]
-; AVX512DQBW-FAST-NEXT:    vpor %xmm1, %xmm4, %xmm1
+; AVX512DQBW-FAST-NEXT:    vpor %xmm3, %xmm4, %xmm3
 ; AVX512DQBW-FAST-NEXT:    movl $4186112, %edi # imm = 0x3FE000
 ; AVX512DQBW-FAST-NEXT:    kmovd %edi, %k1
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm1 {%k1} = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,1,8,15,22,29,20,27,18,25,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} ymm3 {%k1} = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,1,8,15,22,29,20,27,18,25,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-FAST-NEXT:    vinserti32x8 $1, %ymm14, %zmm0, %zmm21 {%k1}
+; AVX512DQBW-FAST-NEXT:    vinserti32x8 $1, %ymm14, %zmm0, %zmm20 {%k1}
 ; AVX512DQBW-FAST-NEXT:    vinserti32x8 $1, %ymm16, %zmm0, %zmm9 {%k1}
-; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm0
-; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u],zero,zero,zero,xmm0[5,12],zero,zero,xmm0[1,8,15]
+; AVX512DQBW-FAST-NEXT:    vextracti128 $1, %ymm10, %xmm1
+; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u],zero,zero,zero,xmm1[5,12],zero,zero,xmm1[1,8,15]
 ; AVX512DQBW-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm10[u,u,u,u,u,u,0,7,14],zero,zero,xmm10[3,10],zero,zero,zero
-; AVX512DQBW-FAST-NEXT:    vpor %xmm0, %xmm4, %xmm0
-; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,8,9,10,35,36,37,38,39]
-; AVX512DQBW-FAST-NEXT:    vpermi2w %zmm0, %zmm1, %zmm4
-; AVX512DQBW-FAST-NEXT:    vinserti32x8 $1, %ymm4, %zmm0, %zmm3 {%k1}
+; AVX512DQBW-FAST-NEXT:    vpor %xmm1, %xmm4, %xmm1
+; AVX512DQBW-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQBW-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
+; AVX512DQBW-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [16,17,18,19,20,21,22,23,24,25,26,43,44,45,46,47]
+; AVX512DQBW-FAST-NEXT:    vpermi2w %zmm1, %zmm3, %zmm4
+; AVX512DQBW-FAST-NEXT:    vinserti32x8 $1, %ymm4, %zmm0, %zmm0 {%k1}
 ; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQBW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rdi
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm2, (%rsi)
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm13, (%rdx)
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm15, (%rcx)
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm18, (%r8)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm21, (%r9)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm20, (%r9)
 ; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm9, (%rdi)
-; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm3, (%rax)
+; AVX512DQBW-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512DQBW-FAST-NEXT:    vzeroupper
 ; AVX512DQBW-FAST-NEXT:    retq
   %wide.vec = load <448 x i8>, ptr %in.vec, align 64

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-8.ll
index 971b61dc3bcdb..9cbb3fea50c7d 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-8.ll
@@ -4295,263 +4295,309 @@ define void @load_i8_stride8_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm0
 ; AVX512F-SLOW-NEXT:    vpmovqb %zmm0, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,0,8,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm2, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,u,0,8,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm2, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, %xmm12
 ; AVX512F-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm2
-; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm2, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm2, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, %xmm6
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,0,8,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm2, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, %xmm10
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,0,8,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm2, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, %xmm7
 ; AVX512F-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm2
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm2, %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, %xmm9
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm18 = [47244640258,47244640258,47244640258,47244640258]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm18, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm1
-; AVX512F-SLOW-NEXT:    vpmovqb %ymm1, %xmm6
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm17 = [38654705664,38654705664,38654705664,38654705664]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm0, %ymm17, %ymm6
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm3[6,7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm2, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, %xmm15
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm8[0],xmm3[0],xmm8[1],xmm3[1],xmm8[2],xmm3[2],xmm8[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5,6],ymm1[7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm3
+; AVX512F-SLOW-NEXT:    vpmovqb %ymm3, %xmm3
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5],ymm3[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm1, %xmm11
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm19
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm1, %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm1, %xmm6
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm1, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm18
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm9
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm9, %xmm4
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm1, %xmm13
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm1, %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm11
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm11, %xmm8
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm13[0],xmm8[1],xmm13[1],xmm8[2],xmm13[2],xmm8[3],xmm13[3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm8 = xmm8[0,1,2],xmm12[3]
-; AVX512F-SLOW-NEXT:    vpmovqb %zmm16, %xmm12
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm8 = xmm12[0,1],xmm8[2,3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm20
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm1, %xmm11
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm19
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm1
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm1, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm20
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm10[3]
+; AVX512F-SLOW-NEXT:    vpmovqb %zmm16, %xmm10
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm10[0,1],xmm5[2,3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm17
 ; AVX512F-SLOW-NEXT:    vmovdqa 160(%rdi), %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa 176(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,u,1,9,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm4, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm5, %xmm12
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm5, %xmm14
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm14[0],xmm8[0],xmm14[1],xmm8[1],xmm14[2],xmm8[2],xmm14[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,1,9,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm14
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm10, %xmm24
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm15
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm8, %ymm18, %ymm10
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,1,9,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm1, %xmm14
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm1, %xmm13
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm0, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm23
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3]
-; AVX512F-SLOW-NEXT:    vmovdqa 128(%rdi), %xmm14
-; AVX512F-SLOW-NEXT:    vmovdqa 144(%rdi), %xmm15
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,1,9,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm12, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm12, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm6, %xmm12
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm6, %xmm8
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm12[0],xmm5[0],xmm12[1],xmm5[1],xmm12[2],xmm5[2],xmm12[3],xmm5[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,u,1,9,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm7, %xmm12
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm15, %xmm13
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm15, %xmm10
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm12, %ymm0, %ymm12
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm12[0,1,2,3,4,5,6],ymm5[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,1,9,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm1, %xmm13
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm24
+; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm0, %xmm12
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm21
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3]
+; AVX512F-SLOW-NEXT:    vmovdqa 128(%rdi), %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa 144(%rdi), %xmm6
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm15, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm14, %xmm1
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm6, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm6, %xmm13
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm3, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm3, %xmm12
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm8, %ymm17, %ymm0
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm10[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm5
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm5, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm6, %xmm3
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm11, %xmm2
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm1
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm3, %xmm1
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm2
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm6
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm6, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm11
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm11, %xmm5
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3]
 ; AVX512F-SLOW-NEXT:    vpsrlq $8, %zmm16, %zmm2
 ; AVX512F-SLOW-NEXT:    vpmovqb %zmm2, %xmm2
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm19
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm18
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,2,10,u,u,u,u,u,u,u,u>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm12, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm12, %xmm22
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm2
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,2,10,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm8
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm8[0],xmm3[0],xmm8[1],xmm3[1],xmm8[2],xmm3[2],xmm8[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm18, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,2,10,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm13, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm13, %xmm25
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm13
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm13, %xmm1
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm15, %xmm10
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm14, %xmm8
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm17, %ymm8
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3,4,5],ymm3[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm23
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm14
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm10, %xmm23
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm14[0],xmm5[0],xmm14[1],xmm5[1],xmm14[2],xmm5[2],xmm14[3],xmm5[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5,6],ymm1[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,2,10,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm10
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm10, %xmm14
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm10
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm10, %xmm5
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm13, %xmm15
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm12, %xmm14
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm14[0,1,2,3,4],ymm5[5],ymm14[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm1[6,7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm20
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm9, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm9, %xmm0
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm6, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm22
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm11, %xmm6
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm11, %xmm2
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3]
 ; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm16, %zmm2
 ; AVX512F-SLOW-NEXT:    vpmovqb %zmm2, %xmm2
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm21
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm19
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,3,11,u,u,u,u,u,u,u,u>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm4, %xmm12
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm21
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm8, %xmm25
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,3,11,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm5
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm8
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm8[0],xmm3[0],xmm8[1],xmm3[1],xmm8[2],xmm3[2],xmm8[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm18, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,3,11,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm13, %xmm1
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm15, %xmm10
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm14, %xmm8
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm17, %ymm8
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3,4,5],ymm3[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm28
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm11, %xmm2
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm9
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm14
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm14[0],xmm5[0],xmm14[1],xmm5[1],xmm14[2],xmm5[2],xmm14[3],xmm5[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5,6],ymm1[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,3,11,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm11
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm11, %xmm14
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm10, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm10, %xmm26
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm13, %xmm15
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm12, %xmm14
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm14[0,1,2,3,4],ymm5[5],ymm14[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5],ymm1[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm3, %xmm15
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm10
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm6, %xmm8
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3]
 ; AVX512F-SLOW-NEXT:    vpsrlq $24, %zmm16, %zmm2
 ; AVX512F-SLOW-NEXT:    vpmovqb %zmm2, %xmm2
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm22
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,4,12,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm12, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm12, %xmm24
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm27
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm20
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,u,u,4,12,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm6
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm6, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm0, %xmm2
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,4,12,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm5, %xmm6
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm8
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm8[0],xmm3[0],xmm8[1],xmm3[1],xmm8[2],xmm3[2],xmm8[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm18, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,4,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm13, %xmm1
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm15, %xmm10
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm14, %xmm8
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm17, %ymm8
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3,4,5],ymm3[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm12
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm12, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm5
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm7, %xmm25
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm11, %xmm2
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm14
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm14[0],xmm5[0],xmm14[1],xmm5[1],xmm14[2],xmm5[2],xmm14[3],xmm5[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5,6],ymm1[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,4,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm11, %xmm14
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm0, %xmm5
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm13, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm13, %xmm28
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm12, %xmm14
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm12, %xmm24
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm5[5],ymm0[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm4, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm22
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm15, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm15, %xmm23
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm8, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm8, %xmm14
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3]
 ; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm16, %zmm2
 ; AVX512F-SLOW-NEXT:    vpmovqb %zmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,5,13,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm5
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm2
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm2
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm21
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,u,5,13,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm6, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm6, %xmm13
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm4, %xmm2
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,5,13,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm6, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm6, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm26
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm8[0],xmm3[0],xmm8[1],xmm3[1],xmm8[2],xmm3[2],xmm8[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm18, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,5,13,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm4, %xmm9
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm13, %xmm1
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm15, %xmm10
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm14, %xmm8
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm17, %ymm8
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0,1,2,3,4,5],ymm3[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm12, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm8, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm11, %xmm2
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm7, %xmm25
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm9, %xmm15
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5,6],ymm1[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,5,13,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm11, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm11, %xmm27
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm6
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm6, %xmm3
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm11
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm11, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm8, %xmm5
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5],ymm0[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm5, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm7
+; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm7, %xmm3
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm10, %xmm23
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm14, %xmm2
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3]
 ; AVX512F-SLOW-NEXT:    vpsrlq $40, %zmm16, %zmm2
 ; AVX512F-SLOW-NEXT:    vpmovqb %zmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm25
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,6,14,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm5
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm5, %xmm2
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm22
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,6,14,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm13, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm13, %xmm12
+; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm4, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm4, %xmm13
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,6,14,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm7, %xmm27
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm10
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm10[0],xmm3[0],xmm10[1],xmm3[1],xmm10[2],xmm3[2],xmm10[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm18, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,6,14,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm9, %xmm10
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm13, %xmm1
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm15, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm14, %xmm10
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm17, %ymm0
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm12, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm10
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm4, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm10
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm15, %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm15, %xmm25
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5,6],ymm1[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,6,14,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm0, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm6, %xmm3
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm11, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm15
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm8, %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm11
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm3[5],ymm0[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm5, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm24
+; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm7, %xmm3
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm8
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm8, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm11, %xmm2
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm14, %xmm2
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3]
 ; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm16, %zmm2
@@ -4559,44 +4605,49 @@ define void @load_i8_stride8_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,u,u,7,15,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm2
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm5, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm12, %xmm2
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm13, %xmm3
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,7,15,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm7, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm10, %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm5, %xmm5
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm2, %ymm18, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,7,15,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm5
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm13, %xmm2
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5,6],ymm2[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,7,15,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm5, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm6, %xmm4
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm15, %xmm9
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm14, %xmm5
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm2, %ymm17, %ymm5
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3,4,5],ymm4[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm12, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm10, %xmm1
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm15, %xmm6
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm11, %xmm5
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5],ymm5[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5],ymm2[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm7, %xmm1
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm8, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm11, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm14, %xmm3
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
 ; AVX512F-SLOW-NEXT:    vpsrlq $56, %zmm16, %zmm3
 ; AVX512F-SLOW-NEXT:    vpmovqb %zmm3, %xmm3
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, (%rsi)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm19, (%rdx)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm21, (%rcx)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm22, (%r8)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm23, (%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm17, (%rsi)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm18, (%rdx)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm19, (%rcx)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, (%r8)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm21, (%r9)
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm25, (%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm22, (%rax)
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, (%rax)
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
@@ -4844,264 +4895,294 @@ define void @load_i8_stride8_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm0
 ; AVX512BW-SLOW-NEXT:    vpmovqb %zmm0, %xmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm2
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,0,8,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm2, %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm4
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm4, %xmm3
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,u,0,8,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm2, %xmm1
+; AVX512BW-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm6
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm6, %xmm3
 ; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,0,8,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm5, %xmm3
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm7
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,0,8,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm7, %xmm3
 ; AVX512BW-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm9
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm9, %xmm6
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
-; AVX512BW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm10 = [47244640258,47244640258,47244640258,47244640258]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm1, %ymm10, %ymm3
-; AVX512BW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm1
-; AVX512BW-SLOW-NEXT:    vpmovqb %ymm1, %xmm6
-; AVX512BW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [38654705664,38654705664,38654705664,38654705664]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm1, %ymm6
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm3[6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm3
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm3, %xmm11
-; AVX512BW-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm6
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm6, %xmm7
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm7
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm7, %xmm13
-; AVX512BW-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm11
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm11, %xmm8
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm13[0],xmm8[1],xmm13[1],xmm8[2],xmm13[2],xmm8[3],xmm13[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm8 = xmm8[0,1,2],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm16, %xmm12
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm8 = xmm12[0,1],xmm8[2,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512BW-SLOW-NEXT:    vmovdqa64 128(%rdi), %xmm20
-; AVX512BW-SLOW-NEXT:    vmovdqa64 144(%rdi), %xmm21
-; AVX512BW-SLOW-NEXT:    vmovdqa64 160(%rdi), %xmm22
-; AVX512BW-SLOW-NEXT:    vmovdqa 176(%rdi), %xmm15
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm9, %xmm8
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm8[0],xmm3[0],xmm8[1],xmm3[1],xmm8[2],xmm3[2],xmm8[3],xmm3[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5,6],ymm1[7]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512BW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm3
+; AVX512BW-SLOW-NEXT:    vpmovqb %ymm3, %xmm3
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3,4],ymm0[5],ymm3[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm1
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm1, %xmm8
+; AVX512BW-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm3
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm3, %xmm4
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm4
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm4, %xmm11
+; AVX512BW-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm8
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm8, %xmm5
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm5[0,1,2],xmm10[3]
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm16, %xmm10
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm10[0,1],xmm5[2,3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm20
+; AVX512BW-SLOW-NEXT:    vmovdqa64 128(%rdi), %xmm19
+; AVX512BW-SLOW-NEXT:    vmovdqa 144(%rdi), %xmm11
+; AVX512BW-SLOW-NEXT:    vmovdqa 160(%rdi), %xmm12
+; AVX512BW-SLOW-NEXT:    vmovdqa 176(%rdi), %xmm13
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,1,9,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm8
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm17
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm17[0],xmm8[0],xmm17[1],xmm8[1],xmm17[2],xmm8[2],xmm17[3],xmm8[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,1,9,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm5, %xmm18
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm9, %xmm19
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm19[0],xmm18[0],xmm19[1],xmm18[1],xmm19[2],xmm18[2],xmm19[3],xmm18[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm8, %ymm10, %ymm12
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,1,9,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm15, %xmm18
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm22, %xmm8
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm18[0],xmm8[1],xmm18[1],xmm8[2],xmm18[2],xmm8[3],xmm18[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm18 = <1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm18, %xmm21, %xmm19
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm18, %xmm20, %xmm18
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm18[0],xmm19[0],xmm18[1],xmm19[1],xmm18[2],xmm19[2],xmm18[3],xmm19[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm8, %ymm1, %ymm13
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm13[0,1,2,3,4,5],ymm12[6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm12
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm7, %xmm12
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm11, %xmm13
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm12[0,1,2],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $8, %zmm16, %zmm12
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm12[0,1],xmm0[2,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm19
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm14
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm14[0],xmm5[0],xmm14[1],xmm5[1],xmm14[2],xmm5[2],xmm14[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,u,1,9,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm7, %xmm15
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm9, %xmm17
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm17[0],xmm15[0],xmm17[1],xmm15[1],xmm17[2],xmm15[2],xmm17[3],xmm15[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm15[0,1,2,3,4,5,6],ymm5[7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,1,9,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm13, %xmm17
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm12, %xmm15
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm15[0],xmm17[0],xmm15[1],xmm17[1],xmm15[2],xmm17[2],xmm15[3],xmm17[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm11, %xmm18
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm19, %xmm17
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm17 = xmm17[0],xmm18[0],xmm17[1],xmm18[1],xmm17[2],xmm18[2],xmm17[3],xmm18[3]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm10
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3,4],ymm15[5],ymm10[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm10[0,1,2,3,4,5],ymm5[6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm10
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm4, %xmm10
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm8, %xmm14
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm14[0],xmm10[0],xmm14[1],xmm10[1],xmm14[2],xmm10[2],xmm14[3],xmm10[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm10[0,1,2],xmm0[3]
+; AVX512BW-SLOW-NEXT:    vpsrlq $8, %zmm16, %zmm10
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm10, %xmm10
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm10[0,1],xmm0[2,3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm21
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,2,10,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm8
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm12
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm12[0],xmm8[0],xmm12[1],xmm8[1],xmm12[2],xmm8[2],xmm12[3],xmm8[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,2,10,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm5, %xmm13
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm9, %xmm17
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm17[0],xmm13[0],xmm17[1],xmm13[1],xmm17[2],xmm13[2],xmm17[3],xmm13[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm8, %ymm10, %ymm13
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,2,10,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm15, %xmm17
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm22, %xmm8
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm17[0],xmm8[1],xmm17[1],xmm8[2],xmm17[2],xmm8[3],xmm17[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm21, %xmm18
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm20, %xmm17
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm17[0],xmm18[0],xmm17[1],xmm18[1],xmm17[2],xmm18[2],xmm17[3],xmm18[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm8, %ymm1, %ymm14
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3,4,5],ymm13[6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm13
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm7, %xmm13
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm11, %xmm12
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm12[0,1,2],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $16, %zmm16, %zmm12
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm12[0,1],xmm0[2,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm24
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm10
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm10[0],xmm5[0],xmm10[1],xmm5[1],xmm10[2],xmm5[2],xmm10[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,2,10,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm7, %xmm14
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm9, %xmm15
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm14[0,1,2,3,4,5,6],ymm5[7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,2,10,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm13, %xmm15
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm12, %xmm14
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm11, %xmm17
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm19, %xmm15
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm15[0],xmm17[0],xmm15[1],xmm17[1],xmm15[2],xmm17[2],xmm15[3],xmm17[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm15[0,1,2,3,4],ymm14[5],ymm15[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm14[0,1,2,3,4,5],ymm5[6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm14
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm4, %xmm14
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm8, %xmm10
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm10[0,1,2],xmm0[3]
+; AVX512BW-SLOW-NEXT:    vpsrlq $16, %zmm16, %zmm10
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm10, %xmm10
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm10[0,1],xmm0[2,3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm22
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,3,11,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm8
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm12
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm12[0],xmm8[0],xmm12[1],xmm8[1],xmm12[2],xmm8[2],xmm12[3],xmm8[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,3,11,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm5, %xmm13
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm9, %xmm14
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm8, %ymm10, %ymm13
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,3,11,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm15, %xmm14
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm22, %xmm8
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm21, %xmm17
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm20, %xmm14
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm14[0],xmm17[0],xmm14[1],xmm17[1],xmm14[2],xmm17[2],xmm14[3],xmm17[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm8, %ymm1, %ymm14
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3,4,5],ymm13[6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm13
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm7, %xmm13
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm11, %xmm12
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm12[0,1,2],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $24, %zmm16, %zmm12
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm12[0,1],xmm0[2,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm25
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm10
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm10[0],xmm5[0],xmm10[1],xmm5[1],xmm10[2],xmm5[2],xmm10[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,3,11,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm7, %xmm14
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm9, %xmm15
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm14[0,1,2,3,4,5,6],ymm5[7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,3,11,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm13, %xmm15
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm12, %xmm14
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm11, %xmm17
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm19, %xmm15
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm15[0],xmm17[0],xmm15[1],xmm17[1],xmm15[2],xmm17[2],xmm15[3],xmm17[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm15[0,1,2,3,4],ymm14[5],ymm15[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm14[0,1,2,3,4,5],ymm5[6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm14
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm4, %xmm14
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm8, %xmm10
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm10[0,1,2],xmm0[3]
+; AVX512BW-SLOW-NEXT:    vpsrlq $24, %zmm16, %zmm10
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm10, %xmm10
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm10[0,1],xmm0[2,3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm23
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,4,12,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm8
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm12
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm12[0],xmm8[0],xmm12[1],xmm8[1],xmm12[2],xmm8[2],xmm12[3],xmm8[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,4,12,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm5, %xmm13
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm9, %xmm14
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm8, %ymm10, %ymm13
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,4,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm15, %xmm14
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm22, %xmm8
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm21, %xmm17
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm20, %xmm14
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm10
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm10[0],xmm5[0],xmm10[1],xmm5[1],xmm10[2],xmm5[2],xmm10[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,4,12,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm7, %xmm14
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm9, %xmm17
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm17[0],xmm14[0],xmm17[1],xmm14[1],xmm17[2],xmm14[2],xmm17[3],xmm14[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm14[0,1,2,3,4,5,6],ymm5[7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,4,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm13, %xmm17
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm12, %xmm14
 ; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm14[0],xmm17[0],xmm14[1],xmm17[1],xmm14[2],xmm17[2],xmm14[3],xmm17[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm8, %ymm1, %ymm14
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3,4,5],ymm13[6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm13
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm7, %xmm13
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm11, %xmm12
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm12[0,1,2],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $32, %zmm16, %zmm12
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm12[0,1],xmm0[2,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm26
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm11, %xmm18
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm19, %xmm17
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm17 = xmm17[0],xmm18[0],xmm17[1],xmm18[1],xmm17[2],xmm18[2],xmm17[3],xmm18[3]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm15
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm15[0,1,2,3,4],ymm14[5],ymm15[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm14[0,1,2,3,4,5],ymm5[6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm14
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm4, %xmm14
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm8, %xmm10
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm10[0,1,2],xmm0[3]
+; AVX512BW-SLOW-NEXT:    vpsrlq $32, %zmm16, %zmm10
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm10, %xmm10
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm10[0,1],xmm0[2,3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm24
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,5,13,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm8
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm12
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm12[0],xmm8[0],xmm12[1],xmm8[1],xmm12[2],xmm8[2],xmm12[3],xmm8[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,5,13,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm5, %xmm13
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm9, %xmm14
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm8, %ymm10, %ymm13
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,5,13,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm15, %xmm14
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm22, %xmm8
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm21, %xmm17
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm20, %xmm14
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm14[0],xmm17[0],xmm14[1],xmm17[1],xmm14[2],xmm17[2],xmm14[3],xmm17[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm8, %ymm1, %ymm14
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm14[0,1,2,3,4,5],ymm13[6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm13
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm7, %xmm13
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm11, %xmm12
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm12[0,1,2],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $40, %zmm16, %zmm12
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm12[0,1],xmm0[2,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm27
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm10
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm10[0],xmm5[0],xmm10[1],xmm5[1],xmm10[2],xmm5[2],xmm10[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,u,u,u,5,13,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm7, %xmm15
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm9, %xmm17
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm17[0],xmm15[0],xmm17[1],xmm15[1],xmm17[2],xmm15[2],xmm17[3],xmm15[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm15[0,1,2,3,4,5,6],ymm5[7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,5,13,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm13, %xmm17
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm12, %xmm15
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm15[0],xmm17[0],xmm15[1],xmm17[1],xmm15[2],xmm17[2],xmm15[3],xmm17[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm11, %xmm18
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm19, %xmm17
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm17 = xmm17[0],xmm18[0],xmm17[1],xmm18[1],xmm17[2],xmm18[2],xmm17[3],xmm18[3]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm14
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm14 = ymm14[0,1,2,3,4],ymm15[5],ymm14[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm14[0,1,2,3,4,5],ymm5[6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm14
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm4, %xmm14
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm8, %xmm10
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm10[0,1,2],xmm0[3]
+; AVX512BW-SLOW-NEXT:    vpsrlq $40, %zmm16, %zmm10
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm10, %xmm10
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm10[0,1],xmm0[2,3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm25
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,6,14,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm12
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm13
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,u,u,6,14,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm13, %xmm5, %xmm14
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm13, %xmm9, %xmm17
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm17[0],xmm14[0],xmm17[1],xmm14[1],xmm17[2],xmm14[2],xmm17[3],xmm14[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm12, %ymm10, %ymm14
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,6,14,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm15, %xmm17
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm22, %xmm12
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm12[0],xmm17[0],xmm12[1],xmm17[1],xmm12[2],xmm17[2],xmm12[3],xmm17[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm10
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm14
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm14[0],xmm10[0],xmm14[1],xmm10[1],xmm14[2],xmm10[2],xmm14[3],xmm10[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm10
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,u,6,14,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm7, %xmm15
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm9, %xmm17
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm17[0],xmm15[0],xmm17[1],xmm15[1],xmm17[2],xmm15[2],xmm17[3],xmm15[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm15[0,1,2,3,4,5,6],ymm10[7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,6,14,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm13, %xmm17
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm12, %xmm15
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm15[0],xmm17[0],xmm15[1],xmm17[1],xmm15[2],xmm17[2],xmm15[3],xmm17[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm15, %ymm0, %ymm15
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm21, %xmm18
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm20, %xmm17
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm17[0],xmm18[0],xmm17[1],xmm18[1],xmm17[2],xmm18[2],xmm17[3],xmm18[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm12, %ymm1, %ymm8
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm14[6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm12
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm13, %xmm7, %xmm12
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm13, %xmm11, %xmm13
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm12[0,1,2],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $48, %zmm16, %zmm12
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm12[0,1],xmm0[2,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm8[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,7,15,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm2, %xmm2
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,7,15,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm5, %xmm5
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm9, %xmm9
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm2, %ymm10, %ymm5
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,7,15,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm15, %xmm9
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm22, %xmm2
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1],xmm2[2],xmm9[2],xmm2[3],xmm9[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm11, %xmm18
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm19, %xmm17
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm17 = xmm17[0],xmm18[0],xmm17[1],xmm18[1],xmm17[2],xmm18[2],xmm17[3],xmm18[3]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $1, %xmm17, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4],ymm15[5],ymm5[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm10[6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm10
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm4, %xmm10
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm8, %xmm14
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm14[0],xmm10[0],xmm14[1],xmm10[1],xmm14[2],xmm10[2],xmm14[3],xmm10[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm10[0,1,2],xmm0[3]
+; AVX512BW-SLOW-NEXT:    vpsrlq $48, %zmm16, %zmm10
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm10, %xmm10
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm10[0,1],xmm0[2,3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,7,15,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm2, %xmm2
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm6, %xmm6
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,7,15,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm7, %xmm7
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm9, %xmm9
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3,4,5,6],ymm2[7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,7,15,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm13, %xmm9
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm12, %xmm7
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm21, %xmm10
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm20, %xmm9
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm11, %xmm10
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm19, %xmm9
 ; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm2, %ymm1, %ymm9
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm9[0,1,2,3,4,5],ymm5[6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm3, %xmm2
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm6, %xmm3
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm7, %xmm3
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm11, %xmm4
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1,2,3,4],ymm7[5],ymm9[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3,4,5],ymm2[6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm4, %xmm3
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm8, %xmm4
 ; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
 ; AVX512BW-SLOW-NEXT:    vpsrlq $56, %zmm16, %zmm3
 ; AVX512BW-SLOW-NEXT:    vpmovqb %zmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm23, (%rsi)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm19, (%rdx)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm24, (%rcx)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm25, (%r8)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm26, (%r9)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm27, (%r11)
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm20, (%rsi)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm21, (%rdx)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm22, (%rcx)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm23, (%r8)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm24, (%r9)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm25, (%r11)
 ; AVX512BW-SLOW-NEXT:    vmovdqa %ymm0, (%r10)
 ; AVX512BW-SLOW-NEXT:    vmovdqa %ymm1, (%rax)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
@@ -10550,669 +10631,730 @@ define void @load_i8_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ;
 ; AVX512F-SLOW-LABEL: load_i8_stride8_vf64:
 ; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    subq $568, %rsp # imm = 0x238
-; AVX512F-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm16
+; AVX512F-SLOW-NEXT:    subq $456, %rsp # imm = 0x1C8
+; AVX512F-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm27
 ; AVX512F-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm0
-; AVX512F-SLOW-NEXT:    vpmovqb %zmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa 496(%rdi), %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,u,u,0,8,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm3, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm3, %xmm7
+; AVX512F-SLOW-NEXT:    vpmovqb %zmm0, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa 496(%rdi), %xmm15
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,0,8,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm15, %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa 480(%rdi), %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm4, %xmm8
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512F-SLOW-NEXT:    vmovdqa 464(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,0,8,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm5, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm5, %xmm9
-; AVX512F-SLOW-NEXT:    vmovdqa 448(%rdi), %xmm6
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm6, %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm6, %xmm10
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm4, %xmm9
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vmovdqa 464(%rdi), %xmm13
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,0,8,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm13, %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa 448(%rdi), %xmm14
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm14, %xmm5
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm26 = [47244640258,47244640258,47244640258,47244640258]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm2, %ymm26, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm2
-; AVX512F-SLOW-NEXT:    vpmovqb %ymm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm25 = [38654705664,38654705664,38654705664,38654705664]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm0, %ymm25, %ymm2
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm4[6,7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5,6],ymm3[7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm4
+; AVX512F-SLOW-NEXT:    vpmovqb %ymm4, %xmm4
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4],ymm2[5],ymm4[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa 368(%rdi), %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm18
-; AVX512F-SLOW-NEXT:    vmovdqa 352(%rdi), %xmm12
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm12, %xmm4
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm31
+; AVX512F-SLOW-NEXT:    vmovdqa 352(%rdi), %xmm11
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm11, %xmm4
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa 336(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm5, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm5, %xmm4
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm19
-; AVX512F-SLOW-NEXT:    vmovdqa 320(%rdi), %xmm6
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm6, %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm22
+; AVX512F-SLOW-NEXT:    vmovdqa 320(%rdi), %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm5, %xmm5
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm4[0,1,2],xmm2[3]
-; AVX512F-SLOW-NEXT:    vpmovqb %zmm16, %xmm4
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm2
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3]
+; AVX512F-SLOW-NEXT:    vpmovqb %zmm27, %xmm4
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm0, %zmm3
 ; AVX512F-SLOW-NEXT:    movb $-64, %al
 ; AVX512F-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm2 {%k1}
-; AVX512F-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm11
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm11, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; AVX512F-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm4
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm3 {%k1}
+; AVX512F-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm22
 ; AVX512F-SLOW-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm4, %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm12
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm12, %xmm4
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512F-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm5, %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm24
 ; AVX512F-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm6
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm6, %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm23
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm6, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm16
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm5
 ; AVX512F-SLOW-NEXT:    vpmovqb %zmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm0, %ymm26, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm0
-; AVX512F-SLOW-NEXT:    vpmovqb %ymm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm5, %ymm25, %ymm0
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm4[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm13
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm13, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm14
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm14, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5,6],ymm2[7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm4
+; AVX512F-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm5
+; AVX512F-SLOW-NEXT:    vpmovqb %ymm5, %xmm5
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5],ymm5[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5],ymm2[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm20
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm29
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm5, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm24
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm5, %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm21
 ; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm5, %xmm3
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm17
-; AVX512F-SLOW-NEXT:    vpmovqb %zmm17, %xmm3
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm30
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm4
+; AVX512F-SLOW-NEXT:    vpmovqb %zmm4, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm4, %zmm18
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,u,u,1,9,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm7, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm7, %xmm20
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm8, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm8, %xmm27
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,1,9,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm29
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm10, %xmm31
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,1,9,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm15, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm9, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm26
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,u,1,9,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm13, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm14, %xmm4
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm26, %ymm3
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm2[7]
 ; AVX512F-SLOW-NEXT:    vmovdqa 416(%rdi), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa 432(%rdi), %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa 432(%rdi), %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,1,9,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm5, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm21
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm1, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm25
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm0, %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm30
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm23
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa 384(%rdi), %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 400(%rdi), %xmm15
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm15, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm0, %xmm10
+; AVX512F-SLOW-NEXT:    vmovdqa 400(%rdi), %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm1, %xmm7
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm28
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm0, %xmm10
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm10[0],xmm7[0],xmm10[1],xmm7[1],xmm10[2],xmm7[2],xmm10[3],xmm7[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm5, %ymm25, %ymm7
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm7[0,1,2,3,4,5],ymm3[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm18, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm0, %xmm5
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm12, %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm12, %xmm28
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3,4],ymm5[5],ymm7[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3,4,5],ymm3[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm0, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm11, %xmm7
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm9
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm12
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm12, %xmm10
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm9, %xmm7
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm0, %xmm10
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm10[0],xmm7[0],xmm10[1],xmm7[1],xmm10[2],xmm7[2],xmm10[3],xmm7[3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm7[0,1,2],xmm5[3]
-; AVX512F-SLOW-NEXT:    vpsrlq $8, %zmm16, %zmm7
+; AVX512F-SLOW-NEXT:    vpsrlq $8, %zmm27, %zmm7
 ; AVX512F-SLOW-NEXT:    vpmovqb %zmm7, %xmm7
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3]
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm5, %zmm0, %zmm19
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm19 {%k1}
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm0, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm11, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm0, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm12, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm12, %xmm22
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm0, %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm10
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm7
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm3, %ymm26, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa 160(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 176(%rdi), %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm1, %xmm4
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm12
+; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm12, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm0, %xmm7
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3,4,5,6],ymm3[7]
+; AVX512F-SLOW-NEXT:    vmovdqa 160(%rdi), %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 176(%rdi), %xmm1
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm1, %xmm10
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm17
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm0, %xmm4
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa 128(%rdi), %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa 144(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm4, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm4, (%rsp) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm22
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm3, %ymm25, %ymm1
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm13, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm13, %xmm23
-; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm14, %xmm3
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm14
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm14, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 144(%rdi), %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm16
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm5, (%rsp) # 16-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm1[5],ymm0[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm7
+; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm7, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm2
+; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm2, %xmm2
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm10
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm10, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3]
-; AVX512F-SLOW-NEXT:    vpsrlq $8, %zmm17, %zmm2
+; AVX512F-SLOW-NEXT:    vpsrlq $8, %zmm18, %zmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm18, %zmm21
 ; AVX512F-SLOW-NEXT:    vpmovqb %zmm2, %xmm2
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm19, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,u,u,u,2,10,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm11
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm11, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm13
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm13, %xmm2
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,2,10,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm0, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm0, %xmm6
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm26, %ymm6
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm15, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm15, %xmm24
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm0, %xmm2
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,2,10,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm13, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm13, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm14, %xmm6
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm14, %xmm19
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm3[0,1,2,3,4,5,6],ymm2[7]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,2,10,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm5, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm0, %xmm8
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm15, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm15, %xmm20
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm7, %xmm15
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm14
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm14, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm13
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm13, %xmm8
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm15, %xmm15
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm8, %ymm25, %ymm0
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm8[5],ymm0[6,7]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm6[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm6
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm7, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm11, %xmm8
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm21
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm12, %xmm15
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm12, %xmm27
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm9, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm9, %xmm15
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm8[0,1,2],xmm6[3]
-; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm16, %zmm8
+; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm27, %zmm8
 ; AVX512F-SLOW-NEXT:    vpmovqb %zmm8, %xmm8
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm8[0,1],xmm6[2,3]
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm0, %zmm6
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm6 {%k1}
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm7, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm7, %xmm18
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm7, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm11
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm11, %xmm8
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm15
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm12, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm12, %xmm20
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm12, %xmm15
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm0, %ymm26, %ymm8
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm9, %xmm3
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm9
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm9, %xmm1
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0,1,2,3,4,5,6],ymm0[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm12, %xmm3
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm12, %xmm2
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm3[5],ymm2[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm7, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm7, %xmm18
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm7
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm7, %xmm3
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm10, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm10
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm10, %xmm1
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm0, %ymm25, %ymm1
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm8[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm14, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm19
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm2
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3]
-; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm17, %zmm2
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3]
+; AVX512F-SLOW-NEXT:    vpsrlq $16, %zmm21, %zmm2
 ; AVX512F-SLOW-NEXT:    vpmovqb %zmm2, %xmm2
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm6, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,3,11,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm11, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm13, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm13, %xmm22
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,3,11,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm11
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm11, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm9
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm4
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm26, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,3,11,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm12
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm12, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm2
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,3,11,u,u,u,u,u,u,u,u,u,u>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm5, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm12
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm30, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm12, %xmm6
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm29
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm5, %xmm4
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0,1,2,3,4,5,6],ymm2[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,3,11,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm14, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm13, %xmm6
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm5
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm5, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm5, %xmm15
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm13
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm13, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm14, %xmm15
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm6, %ymm25, %ymm8
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3,4,5],ymm4[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm13, %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm14
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm14, %xmm8
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm21, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm15
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm15, %xmm15
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3,4],ymm6[5],ymm8[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm4[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm6
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm6
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm9, %xmm15
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm22
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm8[0,1,2],xmm6[3]
-; AVX512F-SLOW-NEXT:    vpsrlq $24, %zmm16, %zmm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm16, %zmm30
+; AVX512F-SLOW-NEXT:    vpsrlq $24, %zmm27, %zmm8
 ; AVX512F-SLOW-NEXT:    vpmovqb %zmm8, %xmm8
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm8[0,1],xmm6[2,3]
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm0, %zmm6
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm6 {%k1}
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm11, %xmm8
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm15
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm9, %xmm15
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm4, %ymm26, %ymm8
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm7, %xmm1
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsp), %xmm7 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm7, %xmm3
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm25, %ymm3
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm8[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3,4,5,6],ymm4[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm14
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm14, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm2
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsp), %xmm8 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm9, %xmm3
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5],ymm3[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm4[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm0
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm10, %xmm23
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3]
-; AVX512F-SLOW-NEXT:    vpsrlq $24, %zmm17, %zmm2
-; AVX512F-SLOW-NEXT:    vpmovqb %zmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm3, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm10, %xmm1
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
+; AVX512F-SLOW-NEXT:    vpsrlq $24, %zmm21, %zmm1
+; AVX512F-SLOW-NEXT:    vpmovqb %zmm1, %xmm1
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm6, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,4,12,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm2
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm12, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,4,12,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm11, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm4
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm26, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,4,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,4,12,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm3
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm12, %xmm6
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm5, %xmm4
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0,1,2,3,4,5,6],ymm2[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,4,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm6
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm9
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm9, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm10, %xmm15
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm13, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm24
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm9, %xmm15
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm6, %ymm25, %ymm8
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3,4,5],ymm4[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm13, %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm13, %xmm31
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm14, %xmm8
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3,4],ymm6[5],ymm8[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm4[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm11
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm11, %xmm6
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm8
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm5
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm15
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm7, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm7
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm7, %xmm15
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm16
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm8[0,1,2],xmm6[3]
-; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm16, %zmm8
+; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm27, %zmm8
 ; AVX512F-SLOW-NEXT:    vpmovqb %zmm8, %xmm8
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm8[0,1],xmm6[2,3]
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm0, %zmm6
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm6 {%k1}
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm11, %xmm8
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm4
 ; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm15
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm4, %ymm26, %ymm8
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm10
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm10, %xmm8
 ; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm12, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm14, %xmm1
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm12, %xmm15
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3,4,5,6],ymm4[7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm14, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm13, %xmm2
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsp), %xmm8 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm13, %xmm3
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5],ymm3[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm4[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm14
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm14, %xmm3
 ; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsp), %xmm10 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm10, %xmm3
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm25, %ymm3
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm8[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm7, %xmm24
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm10
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3]
-; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm17, %zmm2
-; AVX512F-SLOW-NEXT:    vpmovqb %zmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm3, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
+; AVX512F-SLOW-NEXT:    vpsrlq $32, %zmm21, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm21, %zmm22
+; AVX512F-SLOW-NEXT:    vpmovqb %zmm1, %xmm1
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm6, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,5,13,u,u,u,u,u,u,u,u>
 ; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm13
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm13, %xmm2
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,5,13,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm26, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,5,13,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm2
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,5,13,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm3
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm6, %xmm6
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm4
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0,1,2,3,4,5,6],ymm2[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,5,13,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm13
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm13, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm6
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm25
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm9, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm28
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm5, %xmm8
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm9, %xmm15
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm6, %ymm25, %ymm8
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3,4,5],ymm4[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm6
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3,4],ymm6[5],ymm8[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm4[6,7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm11, %xmm6
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm26
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm8
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm15
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm29
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm9, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm11
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm11, %xmm15
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm8[0,1,2],xmm6[3]
-; AVX512F-SLOW-NEXT:    vpsrlq $40, %zmm16, %zmm8
+; AVX512F-SLOW-NEXT:    vpsrlq $40, %zmm27, %zmm8
 ; AVX512F-SLOW-NEXT:    vpmovqb %zmm8, %xmm8
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm8[0,1],xmm6[2,3]
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm0, %zmm6
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm6 {%k1}
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm11, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm11, %xmm22
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm7, %xmm28
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm8
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm15
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm10, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm30
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm12, %xmm15
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm12, %xmm31
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm4, %ymm26, %ymm8
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm12, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm12, %xmm21
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm14, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm14, %xmm27
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3,4,5,6],ymm4[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm11
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm11, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm2
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsp), %xmm7 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm7, %xmm8
 ; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm12, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsp), %xmm11 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm11, %xmm3
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm25, %ymm3
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm8[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm7, %xmm18
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm12, %xmm3
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5],ymm3[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm4[6,7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm14, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm14, %xmm20
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm16
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm5
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3]
-; AVX512F-SLOW-NEXT:    vpsrlq $40, %zmm17, %zmm2
-; AVX512F-SLOW-NEXT:    vpmovqb %zmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm6, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm14, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
+; AVX512F-SLOW-NEXT:    vpsrlq $40, %zmm21, %zmm1
+; AVX512F-SLOW-NEXT:    vpmovqb %zmm1, %xmm1
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm6, %zmm23
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,6,14,u,u,u,u,u,u,u,u>
 ; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm13, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm13, %xmm23
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,6,14,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm2
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,6,14,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm21
 ; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm26, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,6,14,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm7, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm6, %xmm6
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm4
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0,1,2,3,4,5,6],ymm2[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,6,14,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm13, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm13, %xmm16
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm6
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm6, %xmm6
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm9, %xmm15
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm13
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm13, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm15, %xmm15
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm6, %ymm25, %ymm8
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3,4,5],ymm4[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm6
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3,4],ymm6[5],ymm8[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm4[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm6
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm18
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm10
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm15
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm9, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm17
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm5, %xmm15
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm8[0,1,2],xmm6[3]
-; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm30, %zmm8
+; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm27, %zmm8
 ; AVX512F-SLOW-NEXT:    vpmovqb %zmm8, %xmm8
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm8[0,1],xmm6[2,3]
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm0, %zmm6
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm6 {%k1}
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm10, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm8
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm13, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm14, %xmm15
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm9
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm9, %xmm15
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm4, %ymm26, %ymm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm15
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm15, %xmm1
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm12, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm12, %xmm19
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm11, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm11, %xmm20
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm25, %ymm3
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm8[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm15
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm15, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm11
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3,4,5,6],ymm4[7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm11, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm11, %xmm19
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm10, %xmm24
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm7, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm7, %xmm29
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm12, %xmm3
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5],ymm3[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm4[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm10
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm10, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm11, %xmm0
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm12
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm12, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3]
-; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm17, %zmm2
-; AVX512F-SLOW-NEXT:    vpmovqb %zmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm6, %zmm24
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm14, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm7, %xmm1
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
+; AVX512F-SLOW-NEXT:    vpsrlq $48, %zmm22, %zmm1
+; AVX512F-SLOW-NEXT:    vpmovqb %zmm1, %xmm1
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm6, %zmm20
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,7,15,u,u,u,u,u,u,u,u>
 ; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,7,15,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,7,15,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm3, %xmm3
 ; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm26, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,7,15,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm7, %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm6, %xmm6
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm4
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0,1,2,3,4,5,6],ymm2[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,7,15,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm6
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm6, %xmm6
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm7, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm13, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm9, %xmm9
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm6, %ymm25, %ymm8
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3,4,5],ymm4[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm6
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3,4],ymm6[5],ymm8[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm4[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm6
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm8
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm9
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm9, %xmm9
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm8[0,1,2],xmm6[3]
-; AVX512F-SLOW-NEXT:    vpsrlq $56, %zmm30, %zmm8
+; AVX512F-SLOW-NEXT:    vpsrlq $56, %zmm27, %zmm8
 ; AVX512F-SLOW-NEXT:    vpmovqb %zmm8, %xmm8
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm6 = xmm8[0,1],xmm6[2,3]
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm6, %zmm0, %zmm6
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm6 {%k1}
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm10, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm8
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm13, %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm14, %xmm9
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm5, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm5, %xmm9
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm4, %ymm26, %ymm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm7, %xmm1
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm7, %xmm3
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vpermt2d %ymm1, %ymm25, %ymm3
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm8[6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm15, %xmm3
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0,1,2,3,4,5,6],ymm4[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm8, %xmm2
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm5, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm12, %xmm3
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4],ymm2[5],ymm3[6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm4[6,7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm10, %xmm3
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm11, %xmm0
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm12, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3]
-; AVX512F-SLOW-NEXT:    vpsrlq $56, %zmm17, %zmm2
-; AVX512F-SLOW-NEXT:    vpmovqb %zmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm14, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm7, %xmm1
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
+; AVX512F-SLOW-NEXT:    vpsrlq $56, %zmm22, %zmm1
+; AVX512F-SLOW-NEXT:    vpmovqb %zmm1, %xmm1
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm6, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-SLOW-NEXT:    vmovaps %zmm1, (%rsi)
@@ -11225,13 +11367,12 @@ define void @load_i8_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX512F-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
 ; AVX512F-SLOW-NEXT:    vmovaps %zmm1, (%r9)
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vmovaps %zmm1, (%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm23, (%rax)
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm24, (%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm20, (%rax)
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512F-SLOW-NEXT:    addq $568, %rsp # imm = 0x238
+; AVX512F-SLOW-NEXT:    addq $456, %rsp # imm = 0x1C8
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
@@ -11804,571 +11945,685 @@ define void @load_i8_stride8_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ;
 ; AVX512BW-SLOW-LABEL: load_i8_stride8_vf64:
 ; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    subq $456, %rsp # imm = 0x1C8
-; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm8
-; AVX512BW-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-SLOW-NEXT:    subq $744, %rsp # imm = 0x2E8
+; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm3
+; AVX512BW-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 128(%rdi), %zmm0
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm0, %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm12
-; AVX512BW-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm0
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm0, %xmm3
-; AVX512BW-SLOW-NEXT:    vmovdqa 496(%rdi), %xmm13
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,0,8,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm13, %xmm2
-; AVX512BW-SLOW-NEXT:    vmovdqa 480(%rdi), %xmm10
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm10, %xmm4
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa 464(%rdi), %xmm15
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,0,8,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm15, %xmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa64 448(%rdi), %xmm16
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm16, %xmm6
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX512BW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm11 = [47244640258,47244640258,47244640258,47244640258]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm4, %ymm11, %ymm5
-; AVX512BW-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm4
-; AVX512BW-SLOW-NEXT:    vpmovqb %ymm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm27 = [38654705664,38654705664,38654705664,38654705664]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm3, %ymm27, %ymm4
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4,5],ymm5[6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa 368(%rdi), %xmm5
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm4
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm22
-; AVX512BW-SLOW-NEXT:    vmovdqa 352(%rdi), %xmm5
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm0, %xmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa64 256(%rdi), %zmm1
+; AVX512BW-SLOW-NEXT:    vmovdqa64 384(%rdi), %zmm2
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm2, %xmm2
+; AVX512BW-SLOW-NEXT:    vmovdqa 496(%rdi), %xmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,u,u,u,u,0,8,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm5, %xmm4
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm5, %xmm7
 ; AVX512BW-SLOW-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa 480(%rdi), %xmm6
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm6, %xmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm25
 ; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa 336(%rdi), %xmm14
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm14, %xmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa64 320(%rdi), %xmm29
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm29, %xmm7
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3]
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm12, %xmm5
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3]
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm0, %zmm6
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-SLOW-NEXT:    vmovdqa 464(%rdi), %xmm6
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm19 = <u,u,u,u,0,8,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm19, %xmm6, %xmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm24
+; AVX512BW-SLOW-NEXT:    vmovdqa 448(%rdi), %xmm6
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm19, %xmm6, %xmm8
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm26
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm8[0],xmm5[0],xmm8[1],xmm5[1],xmm8[2],xmm5[2],xmm8[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4,5,6],ymm4[7]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm5
+; AVX512BW-SLOW-NEXT:    vpmovqb %ymm5, %xmm5
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3,4],ymm2[5],ymm5[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm2[0,1,2,3,4,5],ymm4[6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa 368(%rdi), %xmm2
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm2, %xmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm31
+; AVX512BW-SLOW-NEXT:    vmovdqa 352(%rdi), %xmm2
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm2, %xmm8
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm2, %xmm9
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm8[0],xmm5[0],xmm8[1],xmm5[1],xmm8[2],xmm5[2],xmm8[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa 336(%rdi), %xmm2
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm19, %xmm2, %xmm12
+; AVX512BW-SLOW-NEXT:    vmovdqa 320(%rdi), %xmm2
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm19, %xmm2, %xmm13
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm11 = xmm12[0,1,2],xmm11[3]
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm1, %xmm12
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm1, %zmm6
+; AVX512BW-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm11 = xmm12[0,1],xmm11[2,3]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm11, %zmm0, %zmm20
 ; AVX512BW-SLOW-NEXT:    movb $-64, %al
 ; AVX512BW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm6 {%k1}
-; AVX512BW-SLOW-NEXT:    vmovdqa64 240(%rdi), %xmm17
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm17, %xmm3
-; AVX512BW-SLOW-NEXT:    vmovdqa64 224(%rdi), %xmm18
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm18, %xmm4
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm20 {%k1}
+; AVX512BW-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm1
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm1, %xmm11
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm1, %xmm8
+; AVX512BW-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm1
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm1, %xmm13
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm1, %xmm14
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm11, %ymm0, %ymm11
+; AVX512BW-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm1
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm19, %xmm1, %xmm13
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm17
+; AVX512BW-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm1
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm19, %xmm1, %xmm18
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm29
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm18[0],xmm13[0],xmm18[1],xmm13[1],xmm18[2],xmm13[2],xmm18[3],xmm13[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm13[0,1,2,3,4,5,6],ymm11[7]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512BW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm13
+; AVX512BW-SLOW-NEXT:    vpmovqb %ymm13, %xmm13
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm13
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1,2,3,4],ymm0[5],ymm13[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm0[0,1,2,3,4,5],ymm11[6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa 112(%rdi), %xmm0
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm0, %xmm18
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm23
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm15, %xmm0, %xmm15
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm15[0],xmm18[0],xmm15[1],xmm18[1],xmm15[2],xmm18[2],xmm15[3],xmm18[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm2
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm19, %xmm2, %xmm21
+; AVX512BW-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm2
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm19, %xmm2, %xmm19
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm2, %xmm15
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm19[0],xmm21[0],xmm19[1],xmm21[1],xmm19[2],xmm21[2],xmm19[3],xmm21[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3]
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm3, %xmm2
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm20, %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-SLOW-NEXT:    vmovdqa 160(%rdi), %xmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa 384(%rdi), %xmm4
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vmovdqa 400(%rdi), %xmm12
+; AVX512BW-SLOW-NEXT:    vmovdqa 416(%rdi), %xmm13
+; AVX512BW-SLOW-NEXT:    vmovdqa64 432(%rdi), %xmm16
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,u,1,9,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm7, %xmm1
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm10
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm25, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm25, %xmm2
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,1,9,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm19
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm24, %xmm24
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm26, %xmm25
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm24 = xmm25[0],xmm24[0],xmm25[1],xmm24[1],xmm25[2],xmm24[2],xmm25[3],xmm24[3]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $1, %xmm24, %ymm0, %ymm3
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5,6],ymm1[7]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm27 = <u,u,1,9,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm27, %xmm16, %xmm24
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm22
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm27, %xmm13, %xmm25
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm13, %xmm18
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm24 = xmm25[0],xmm24[0],xmm25[1],xmm24[1],xmm25[2],xmm24[2],xmm25[3],xmm24[3]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $1, %xmm24, %ymm0, %ymm3
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm30 = <1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm30, %xmm12, %xmm24
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm12, %xmm28
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm30, %xmm4, %xmm25
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm24 = xmm25[0],xmm24[0],xmm25[1],xmm24[1],xmm25[2],xmm24[2],xmm25[3],xmm24[3]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $1, %xmm24, %ymm0, %ymm4
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3,4],ymm3[5],ymm4[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm31, %xmm3
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm13
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm9, %xmm7
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm9, %xmm4
 ; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 208(%rdi), %xmm19
+; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm11, %xmm4
+; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm9, %xmm24
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm24[0],xmm4[0],xmm24[1],xmm4[1],xmm24[2],xmm4[2],xmm24[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3]
+; AVX512BW-SLOW-NEXT:    vpsrlq $8, %zmm6, %zmm4
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm4, %xmm4
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm0, %zmm3
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm3 {%k1}
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm8, %xmm1
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm8, %xmm12
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm14, %xmm4
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm14, %xmm16
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm20
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm17, %xmm4
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm29, %xmm24
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm31
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm24[0],xmm4[0],xmm24[1],xmm4[1],xmm24[2],xmm4[2],xmm24[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 176(%rdi), %xmm25
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5,6],ymm1[7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm27, %xmm25, %xmm4
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm0, %xmm6
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm27, %xmm0, %xmm24
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm24[0],xmm4[0],xmm24[1],xmm4[1],xmm24[2],xmm4[2],xmm24[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa 128(%rdi), %xmm8
+; AVX512BW-SLOW-NEXT:    vmovdqa 144(%rdi), %xmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm30, %xmm0, %xmm0
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm30, %xmm8, %xmm30
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm8, %xmm24
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm30[0],xmm0[0],xmm30[1],xmm0[1],xmm30[2],xmm0[2],xmm30[3],xmm0[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5],ymm0[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm23, %xmm1
+; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm14, %xmm4
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm21 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm21, %xmm4
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm15, %xmm2
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm15, %xmm17
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3]
+; AVX512BW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512BW-SLOW-NEXT:    vpsrlq $8, %zmm15, %zmm2
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm2, %xmm2
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,u,u,2,10,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm1, %xmm0, %xmm2
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm1, %xmm10, %xmm3
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,2,10,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm19, %xmm4
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm26, %xmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm8
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5,6],ymm2[7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,2,10,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm22, %xmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm23
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm18, %xmm30
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm22
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm30[0],xmm5[0],xmm30[1],xmm5[1],xmm30[2],xmm5[2],xmm30[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm30 = <2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm30, %xmm28, %xmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm10
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm28, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm18 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm30, %xmm18, %xmm26
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm26[0],xmm0[0],xmm26[1],xmm0[1],xmm26[2],xmm0[2],xmm26[3],xmm0[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm5[5],ymm0[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm2[6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm13, %xmm29
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm1, %xmm13, %xmm2
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm1, %xmm7, %xmm5
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm11, %xmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm27
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm9, %xmm26
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm26[0],xmm5[0],xmm26[1],xmm5[1],xmm26[2],xmm5[2],xmm26[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm5[0,1,2],xmm2[3]
+; AVX512BW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512BW-SLOW-NEXT:    vpsrlq $16, %zmm13, %zmm5
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm5, %xmm5
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm2
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm2 {%k1}
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm1, %xmm12, %xmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm12, %xmm7
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm1, %xmm16, %xmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm16, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm20, %xmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm31, %xmm26
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm26[0],xmm5[0],xmm26[1],xmm5[1],xmm26[2],xmm5[2],xmm26[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3,4,5,6],ymm0[7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm25, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm6, %xmm4
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm30, %xmm5, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm30, %xmm24, %xmm26
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm26[0],xmm5[0],xmm26[1],xmm5[1],xmm26[2],xmm5[2],xmm26[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5],ymm5[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm0[6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rsp), %xmm11 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm1, %xmm11, %xmm4
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm1, %xmm14, %xmm1
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm21, %xmm4
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm17, %xmm3
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm28
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm15, %zmm9
+; AVX512BW-SLOW-NEXT:    vpsrlq $16, %zmm15, %zmm3
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm3, %xmm3
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,3,11,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm14, %xmm1
+; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm15, %xmm3
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,3,11,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm19, %xmm4
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm8, %xmm5
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5,6],ymm1[7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,3,11,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm23, %xmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm22, %xmm26
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm26[0],xmm5[0],xmm26[1],xmm5[1],xmm26[2],xmm5[2],xmm26[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm26 = <3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm10, %xmm30
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm18, %xmm2
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm30[0],xmm2[1],xmm30[1],xmm2[2],xmm30[2],xmm2[3],xmm30[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4],ymm5[5],ymm2[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm29, %xmm2
+; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm10, %xmm5
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm17 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm17, %xmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm12
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm27, %xmm30
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm30[0],xmm5[0],xmm30[1],xmm5[1],xmm30[2],xmm5[2],xmm30[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm5[0,1,2],xmm2[3]
+; AVX512BW-SLOW-NEXT:    vpsrlq $24, %zmm13, %zmm5
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm5, %xmm5
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm5[0,1],xmm2[2,3]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm2
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm2 {%k1}
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm7, %xmm24
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm1
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm16, %xmm5
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm20, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm31, %xmm30
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm7
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm31, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm30[0],xmm5[0],xmm30[1],xmm5[1],xmm30[2],xmm5[2],xmm30[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5,6],ymm1[7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm25, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm6, %xmm4
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm29 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm29, %xmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm27 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm27, %xmm26
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm26[0],xmm5[0],xmm26[1],xmm5[1],xmm26[2],xmm5[2],xmm26[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5],ymm5[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm1[6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm11, %xmm16
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm11, %xmm4
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm20 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm20, %xmm0
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm21, %xmm4
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm28, %xmm3
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[3]
+; AVX512BW-SLOW-NEXT:    vpsrlq $24, %zmm9, %zmm3
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm3, %xmm3
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm3[0,1],xmm0[2,3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm2, %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,4,12,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm14, %xmm1
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm14, %xmm8
+; AVX512BW-SLOW-NEXT:    vmovdqa %xmm15, %xmm14
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm15, %xmm2
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,4,12,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm19, %xmm4
-; AVX512BW-SLOW-NEXT:    vmovdqa64 192(%rdi), %xmm20
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm20, %xmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm15, %xmm5
 ; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm3, %ymm11, %ymm4
-; AVX512BW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512BW-SLOW-NEXT:    vpmovqb %ymm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm1, %ymm27, %ymm3
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm3[0,1,2,3,4,5],ymm4[6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 112(%rdi), %xmm24
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5,6],ymm1[7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,4,12,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm23, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm22, %xmm26
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm26[0],xmm5[0],xmm26[1],xmm5[1],xmm26[2],xmm5[2],xmm26[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm26 = <4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm31 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm31, %xmm30
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm18, %xmm3
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm30[0],xmm3[1],xmm30[1],xmm3[2],xmm30[2],xmm3[3],xmm30[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm5[5],ymm3[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm9, %xmm3
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm10, %xmm5
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm17, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm12, %xmm30
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm30[0],xmm5[0],xmm30[1],xmm5[1],xmm30[2],xmm5[2],xmm30[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm5[0,1,2],xmm3[3]
+; AVX512BW-SLOW-NEXT:    vpsrlq $32, %zmm13, %zmm5
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm5, %xmm5
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm0, %zmm3
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm3 {%k1}
 ; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm24, %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm3
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm28
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa 80(%rdi), %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa64 64(%rdi), %xmm30
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm30, %xmm2
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm8, %xmm1
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm1[0,1],xmm0[2,3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,1,9,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm13, %xmm0
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm10, %xmm1
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,1,9,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm15, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm16, %xmm2
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm11, %ymm4
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm17, %xmm0
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm18, %xmm1
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm19, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm20, %xmm2
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm21 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm11, %ymm21
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,2,10,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm13, %xmm0
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm10, %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm7, %xmm26
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,2,10,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm15, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm16, %xmm2
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm23 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm11, %ymm23
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm17, %xmm0
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm18, %xmm1
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm19, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm20, %xmm2
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm11, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,3,11,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm13, %xmm0
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm25
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,3,11,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm15, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm16, %xmm2
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm11, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm25, %xmm17, %xmm0
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm25, %xmm18, %xmm1
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm19, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm20, %xmm2
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm11, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,4,12,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm13, %xmm0
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm2, %xmm7
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm25 = <u,u,u,u,4,12,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm25, %xmm15, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm25, %xmm16, %xmm2
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm11, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm17, %xmm0
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm18, %xmm1
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm25, %xmm19, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm25, %xmm20, %xmm2
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm11, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,5,13,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm13, %xmm0
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm25
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,5,13,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm15, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm16, %xmm2
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm11, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm25, %xmm17, %xmm0
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm25, %xmm18, %xmm1
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm19, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm20, %xmm2
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm11, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,u,6,14,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm13, %xmm0
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm1
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm25
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,6,14,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm15, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm16, %xmm2
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm11, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm25, %xmm17, %xmm0
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm25, %xmm18, %xmm1
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm19, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm20, %xmm2
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm11, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm31 = <u,u,u,u,u,u,7,15,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm31, %xmm13, %xmm0
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm31, %xmm10, %xmm1
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm26 = <u,u,u,u,7,15,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm15, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm16, %xmm2
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm11, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm31, %xmm17, %xmm0
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm31, %xmm18, %xmm1
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm19, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm20, %xmm2
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm11, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm5[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm6, %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm28 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm28, %xmm5
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm22 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm22, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm7, %xmm30
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm30[0],xmm5[0],xmm30[1],xmm5[1],xmm30[2],xmm5[2],xmm30[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5,6],ymm1[7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm25, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm6, %xmm4
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm29, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm27, %xmm26
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm26[0],xmm5[0],xmm26[1],xmm5[1],xmm26[2],xmm5[2],xmm26[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3,4],ymm4[5],ymm5[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm1[6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm11, %xmm4
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm20, %xmm0
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm21, %xmm4
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm23 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm23, %xmm2
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3]
+; AVX512BW-SLOW-NEXT:    vpsrlq $32, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm2, %xmm2
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-SLOW-NEXT:    vmovdqa64 416(%rdi), %xmm17
-; AVX512BW-SLOW-NEXT:    vmovdqa64 432(%rdi), %xmm19
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,1,9,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm19, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm17, %xmm2
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,5,13,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm1
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm14, %xmm2
 ; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 384(%rdi), %xmm18
-; AVX512BW-SLOW-NEXT:    vmovdqa64 400(%rdi), %xmm20
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <1,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm20, %xmm2
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm18, %xmm3
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm1, %ymm27, %ymm2
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm4[6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm22, %xmm2
-; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm13, %xmm3
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm14, %xmm3
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm29, %xmm11
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm11[0],xmm3[0],xmm11[1],xmm3[1],xmm11[2],xmm3[2],xmm11[3],xmm3[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm12, %zmm25
-; AVX512BW-SLOW-NEXT:    vpsrlq $8, %zmm12, %zmm3
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm11
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm11 {%k1}
-; AVX512BW-SLOW-NEXT:    vmovdqa 160(%rdi), %xmm4
-; AVX512BW-SLOW-NEXT:    vmovdqa 176(%rdi), %xmm3
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm2
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm16 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa 128(%rdi), %xmm2
-; AVX512BW-SLOW-NEXT:    vmovdqa 144(%rdi), %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm1, %xmm0
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm2, %xmm5
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm16, %ymm27, %ymm0
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm5
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm5[6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm24, %xmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm6
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm8, %xmm28, %xmm16
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm16[0],xmm5[0],xmm16[1],xmm5[1],xmm16[2],xmm5[2],xmm16[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,5,13,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm19, %xmm3
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm15, %xmm4
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm15, %xmm19
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5,6],ymm1[7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,5,13,u,u,u,u,u,u,u,u,u,u,u,u>
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm8, %xmm16
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm21
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm30, %xmm30
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm30[0],xmm16[0],xmm30[1],xmm16[1],xmm30[2],xmm16[2],xmm30[3],xmm16[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm12[0,1,2],xmm5[3]
-; AVX512BW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
-; AVX512BW-SLOW-NEXT:    vpsrlq $8, %zmm9, %zmm12
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm12[0,1],xmm5[2,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm11, %zmm0
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm8, %xmm4
+; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm7, %xmm5
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm31, %xmm26
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm18, %xmm30
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm26 = xmm30[0],xmm26[0],xmm30[1],xmm26[1],xmm30[2],xmm26[2],xmm30[3],xmm26[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $1, %xmm26, %ymm0, %ymm6
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4],ymm4[5],ymm6[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm1[6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm9, %xmm4
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm10, %xmm6
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm11
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm17, %xmm6
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm12, %xmm26
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm26[0],xmm6[0],xmm26[1],xmm6[1],xmm26[2],xmm6[2],xmm26[3],xmm6[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm6[0,1,2],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vpsrlq $40, %zmm13, %zmm6
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm6, %xmm6
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm0, %zmm4
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm4 {%k1}
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm17
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm24, %xmm1
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm28, %xmm6
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm22, %xmm6
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm24 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm24, %xmm26
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm26[0],xmm6[0],xmm26[1],xmm6[1],xmm26[2],xmm6[2],xmm26[3],xmm6[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1,2,3,4,5,6],ymm1[7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm25, %xmm6
+; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm9, %xmm3
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm29, %xmm6
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm5, %xmm27, %xmm5
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3,4],ymm3[5],ymm5[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm16, %xmm3
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm20, %xmm0
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm21, %xmm3
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm23, %xmm2
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3]
+; AVX512BW-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512BW-SLOW-NEXT:    vpsrlq $40, %zmm14, %zmm2
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm2, %xmm2
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,2,10,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm19, %xmm5
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm17, %xmm11
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm11[0],xmm5[0],xmm11[1],xmm5[1],xmm11[2],xmm5[2],xmm11[3],xmm5[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <2,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm20, %xmm12
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm18, %xmm30
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm30[0],xmm12[0],xmm30[1],xmm12[1],xmm30[2],xmm12[2],xmm30[3],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm5, %ymm27, %ymm12
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm5
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm12[0,1,2,3,4,5],ymm5[6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,u,u,u,u,u,2,10,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm22, %xmm12
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm13, %xmm30
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm30[0],xmm12[0],xmm30[1],xmm12[1],xmm30[2],xmm12[2],xmm30[3],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm16 = <u,u,u,u,2,10,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm16, %xmm14, %xmm30
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm15
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm16, %xmm29, %xmm29
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm29[0],xmm30[0],xmm29[1],xmm30[1],xmm29[2],xmm30[2],xmm29[3],xmm30[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm10 = xmm10[0,1,2],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $16, %zmm25, %zmm12
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm10 = xmm12[0,1],xmm10[2,3]
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm10, %zmm0, %zmm10
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm10 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm5
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm1, %xmm5
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm2, %xmm11
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm11[0],xmm5[0],xmm11[1],xmm5[1],xmm11[2],xmm5[2],xmm11[3],xmm5[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm27, %ymm5
-; AVX512BW-SLOW-NEXT:    vpblendd $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm0 # 32-byte Folded Reload
-; AVX512BW-SLOW-NEXT:    # ymm0 = ymm5[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm24, %xmm5
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm7, %xmm28, %xmm11
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm11[0],xmm5[0],xmm11[1],xmm5[1],xmm11[2],xmm5[2],xmm11[3],xmm5[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm16, %xmm8, %xmm11
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm16, %xmm21, %xmm12
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm11[0,1,2],xmm5[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $16, %zmm9, %zmm11
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm9, %zmm7
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm11[0,1],xmm5[2,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm10, %zmm28
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,3,11,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm19, %xmm5
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm17, %xmm10
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm10[0],xmm5[0],xmm10[1],xmm5[1],xmm10[2],xmm5[2],xmm10[3],xmm5[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <3,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm20, %xmm12
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm18, %xmm29
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm29[0],xmm12[0],xmm29[1],xmm12[1],xmm29[2],xmm12[2],xmm29[3],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm5, %ymm27, %ymm12
-; AVX512BW-SLOW-NEXT:    vpblendd $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm5 # 32-byte Folded Reload
-; AVX512BW-SLOW-NEXT:    # ymm5 = ymm12[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm9
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,u,u,u,3,11,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm22, %xmm12
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm13, %xmm29
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm11, %xmm23
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm13, %xmm16
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm29[0],xmm12[0],xmm29[1],xmm12[1],xmm29[2],xmm12[2],xmm29[3],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm22 = <u,u,u,u,3,11,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm14, %xmm29
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm15, %xmm30
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm30[0],xmm29[0],xmm30[1],xmm29[1],xmm30[2],xmm29[2],xmm30[3],xmm29[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm11 = xmm11[0,1,2],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $24, %zmm25, %zmm12
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm11 = xmm12[0,1],xmm11[2,3]
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm11, %zmm0, %zmm11
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm11 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm5
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm1, %xmm5
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm2, %xmm10
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm10[0],xmm5[0],xmm10[1],xmm5[1],xmm10[2],xmm5[2],xmm10[3],xmm5[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm27, %ymm5
-; AVX512BW-SLOW-NEXT:    vpblendd $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm0 # 32-byte Folded Reload
-; AVX512BW-SLOW-NEXT:    # ymm0 = ymm5[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm23, %xmm24, %xmm5
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm13
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm23, %xmm6, %xmm10
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm10[0],xmm5[0],xmm10[1],xmm5[1],xmm10[2],xmm5[2],xmm10[3],xmm5[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm8, %xmm10
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm21, %xmm12
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm10[0,1,2],xmm5[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $24, %zmm7, %zmm10
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm10[0,1],xmm5[2,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm11, %zmm24
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,4,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm19, %xmm10
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm17, %xmm11
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <4,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm20, %xmm12
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm18, %xmm29
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm29[0],xmm12[0],xmm29[1],xmm12[1],xmm29[2],xmm12[2],xmm29[3],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm10, %ymm27, %ymm12
-; AVX512BW-SLOW-NEXT:    vpblendd $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm10 # 32-byte Folded Reload
-; AVX512BW-SLOW-NEXT:    # ymm10 = ymm12[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm22 = <u,u,u,u,u,u,4,12,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm9, %xmm12
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm9, %xmm5
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm16, %xmm29
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm29[0],xmm12[0],xmm29[1],xmm12[1],xmm29[2],xmm12[2],xmm29[3],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,4,12,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm14, %xmm29
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm15, %xmm30
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm23
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm30[0],xmm29[0],xmm30[1],xmm29[1],xmm30[2],xmm29[2],xmm30[3],xmm29[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm9 = xmm9[0,1,2],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $32, %zmm25, %zmm12
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm9 = xmm12[0,1],xmm9[2,3]
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm9, %zmm0, %zmm9
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm9 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm10
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm1, %xmm10
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm2, %xmm11
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm27, %ymm10
-; AVX512BW-SLOW-NEXT:    vpblendd $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10, %ymm0 # 32-byte Folded Reload
-; AVX512BW-SLOW-NEXT:    # ymm0 = ymm10[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm13, %xmm10
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm6, %xmm11
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm23, %xmm8, %xmm11
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm23, %xmm21, %xmm12
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm10 = xmm11[0,1,2],xmm10[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $32, %zmm7, %zmm11
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm11, %xmm11
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm10 = xmm11[0,1],xmm10[2,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm10[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm9, %zmm30
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,5,13,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm19, %xmm9
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm17, %xmm10
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <5,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm20, %xmm11
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm18, %xmm12
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm9, %ymm27, %ymm11
-; AVX512BW-SLOW-NEXT:    vpblendd $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm9 # 32-byte Folded Reload
-; AVX512BW-SLOW-NEXT:    # ymm9 = ymm11[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm22 = <u,u,u,u,u,u,5,13,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm5, %xmm11
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm16, %xmm12
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm23
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm22 = <u,u,u,u,5,13,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm14, %xmm12
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm15, %xmm29
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm29[0],xmm12[0],xmm29[1],xmm12[1],xmm29[2],xmm12[2],xmm29[3],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm11 = xmm12[0,1,2],xmm11[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $40, %zmm25, %zmm12
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm12, %xmm12
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm11 = xmm12[0,1],xmm11[2,3]
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm11, %zmm0, %zmm11
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm11 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm9
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm1, %xmm9
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm10, %xmm2, %xmm10
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm0, %ymm27, %ymm9
-; AVX512BW-SLOW-NEXT:    vpblendd $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9, %ymm0 # 32-byte Folded Reload
-; AVX512BW-SLOW-NEXT:    # ymm0 = ymm9[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm23, %xmm13, %xmm9
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm23, %xmm6, %xmm10
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm8, %xmm10
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm21, %xmm12
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm9 = xmm10[0,1,2],xmm9[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $40, %zmm7, %zmm10
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm10, %xmm10
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm9 = xmm10[0,1],xmm9[2,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm9[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm11, %zmm29
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,6,14,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm19, %xmm10
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm17, %xmm11
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm20, %xmm12
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm18, %xmm23
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm23[0],xmm12[0],xmm23[1],xmm12[1],xmm23[2],xmm12[2],xmm23[3],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm10, %ymm27, %ymm12
-; AVX512BW-SLOW-NEXT:    vpblendd $192, (%rsp), %ymm12, %ymm10 # 32-byte Folded Reload
-; AVX512BW-SLOW-NEXT:    # ymm10 = ymm12[0,1,2,3,4,5],mem[6,7]
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,6,14,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm12
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm16, %xmm23
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm22
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm23[0],xmm12[0],xmm23[1],xmm12[1],xmm23[2],xmm12[2],xmm23[3],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm3, %xmm23
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm4, %xmm9
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm23[0],xmm9[1],xmm23[1],xmm9[2],xmm23[2],xmm9[3],xmm23[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm1, %xmm23
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm2, %xmm11
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm11[0],xmm23[0],xmm11[1],xmm23[1],xmm11[2],xmm23[2],xmm11[3],xmm23[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm9, %ymm27, %ymm11
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,7,15,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm19, %xmm19
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm17, %xmm17
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm17 = xmm17[0],xmm19[0],xmm17[1],xmm19[1],xmm17[2],xmm19[2],xmm17[3],xmm19[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm19 = <7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm19, %xmm20, %xmm20
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm19, %xmm18, %xmm18
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm18[0],xmm20[0],xmm18[1],xmm20[1],xmm18[2],xmm20[2],xmm18[3],xmm20[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm17, %ymm27, %ymm0
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm9, %xmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm19, %xmm1, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm19, %xmm2, %xmm2
+; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm1
+; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm15, %xmm2
 ; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpermt2d %ymm3, %ymm27, %ymm1
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm17 = <u,u,u,u,6,14,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm14, %xmm2
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm15, %xmm3
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,u,6,14,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm23 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm23, %xmm3
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm19, %xmm5
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5,6],ymm1[7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,6,14,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm8, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm7, %xmm6
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <6,14,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm31, %xmm26
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm18, %xmm30
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm31
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm26 = xmm30[0],xmm26[0],xmm30[1],xmm26[1],xmm30[2],xmm26[2],xmm30[3],xmm26[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $1, %xmm26, %ymm0, %ymm4
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4],ymm5[5],ymm4[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4,5],ymm1[6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm30 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm30, %xmm4
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm10, %xmm5
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm11, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm12, %xmm26
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm26[0],xmm5[0],xmm26[1],xmm5[1],xmm26[2],xmm5[2],xmm26[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vpsrlq $48, %zmm13, %zmm5
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm5, %xmm5
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,3]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm0, %zmm4
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm4 {%k1}
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm17, %xmm1
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm28, %xmm5
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm22, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm24, %xmm26
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm26[0],xmm5[0],xmm26[1],xmm5[1],xmm26[2],xmm5[2],xmm26[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3,4,5,6],ymm1[7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm25, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm9, %xmm3
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm18
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm29, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm27, %xmm6
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3,4],ymm3[5],ymm5[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm16, %xmm3
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm0, %xmm20, %xmm0
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm21, %xmm3
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{[-0-9]+}}(%r{{[sb]}}p), %xmm26 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm26, %xmm2
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[3]
+; AVX512BW-SLOW-NEXT:    vpsrlq $48, %zmm14, %zmm2
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm2, %xmm2
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm4, %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,u,u,u,7,15,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm1, %xmm2, %xmm2
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm1, %xmm15, %xmm3
 ; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm2[0,1,2],xmm12[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $48, %zmm25, %zmm3
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,u,u,7,15,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm23, %xmm4
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm19, %xmm5
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5,6],ymm2[7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,7,15,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm8, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm7, %xmm6
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <7,15,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm7, %xmm7
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm31, %xmm9
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm7[0,1,2,3,4],ymm5[5],ymm7[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3,4,5],ymm2[6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm1, %xmm30, %xmm5
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm1, %xmm10, %xmm7
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm11, %xmm7
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm12, %xmm8
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm7[0,1,2],xmm5[3]
+; AVX512BW-SLOW-NEXT:    vpsrlq $56, %zmm13, %zmm7
+; AVX512BW-SLOW-NEXT:    vpmovqb %zmm7, %xmm7
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm5 = xmm7[0,1],xmm5[2,3]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm5, %zmm0, %zmm5
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm5 {%k1}
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm1, %xmm17, %xmm2
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm1, %xmm28, %xmm7
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm22, %xmm7
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm24, %xmm8
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3,4,5,6],ymm2[7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm25, %xmm7
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm4, %xmm18, %xmm4
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm29, %xmm7
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm27, %xmm6
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4],ymm4[5],ymm6[6,7]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5],ymm2[6,7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm1, %xmm16, %xmm4
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm1, %xmm20, %xmm1
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm21, %xmm4
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm26, %xmm3
+; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[3]
+; AVX512BW-SLOW-NEXT:    vpsrlq $56, %zmm14, %zmm3
 ; AVX512BW-SLOW-NEXT:    vpmovqb %zmm3, %xmm3
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3]
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm2
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm2 {%k1}
-; AVX512BW-SLOW-NEXT:    vpblendd $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11, %ymm3 # 32-byte Folded Reload
-; AVX512BW-SLOW-NEXT:    # ymm3 = ymm11[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm13, %xmm4
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm6, %xmm10
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm22, %xmm6, %xmm6
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; AVX512BW-SLOW-NEXT:    vmovdqa %xmm8, %xmm11
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm8, %xmm6
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm17, %xmm21, %xmm8
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm6[0,1,2],xmm4[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $48, %zmm7, %zmm6
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm6, %xmm6
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $0, %ymm3, %zmm2, %zmm2
-; AVX512BW-SLOW-NEXT:    vpblendd $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512BW-SLOW-NEXT:    # ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm31, %xmm5, %xmm3
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm31, %xmm16, %xmm4
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm14, %xmm4
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm15, %xmm6
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm4[0,1,2],xmm3[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $56, %zmm25, %zmm4
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm3 = xmm4[0,1],xmm3[2,3]
-; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm0, %zmm3
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm3 {%k1}
-; AVX512BW-SLOW-NEXT:    vpblendd $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
-; AVX512BW-SLOW-NEXT:    # ymm0 = ymm1[0,1,2,3,4,5],mem[6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm31, %xmm13, %xmm1
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm31, %xmm10, %xmm4
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm11, %xmm4
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm26, %xmm21, %xmm6
-; AVX512BW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[3]
-; AVX512BW-SLOW-NEXT:    vpsrlq $56, %zmm7, %zmm4
-; AVX512BW-SLOW-NEXT:    vpmovqb %zmm4, %xmm4
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm4[0,1],xmm1[2,3]
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $0, %ymm0, %zmm3, %zmm0
-; AVX512BW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-SLOW-NEXT:    vmovaps %zmm1, (%rsi)
-; AVX512BW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-SLOW-NEXT:    vmovaps %zmm1, (%rdx)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm28, (%rcx)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm24, (%r8)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm30, (%r9)
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $0, %ymm1, %zmm5, %zmm1
+; AVX512BW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-SLOW-NEXT:    vmovaps %zmm2, (%rsi)
+; AVX512BW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-SLOW-NEXT:    vmovaps %zmm2, (%rdx)
+; AVX512BW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-SLOW-NEXT:    vmovaps %zmm2, (%rcx)
+; AVX512BW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-SLOW-NEXT:    vmovaps %zmm2, (%r8)
+; AVX512BW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-SLOW-NEXT:    vmovaps %zmm2, (%r9)
 ; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm29, (%rax)
-; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm2, (%rax)
+; AVX512BW-SLOW-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512BW-SLOW-NEXT:    vmovaps %zmm2, (%rax)
 ; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512BW-SLOW-NEXT:    addq $456, %rsp # imm = 0x1C8
+; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm1, (%rax)
+; AVX512BW-SLOW-NEXT:    addq $744, %rsp # imm = 0x2E8
 ; AVX512BW-SLOW-NEXT:    vzeroupper
 ; AVX512BW-SLOW-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
index 23b04d842c545..eb0ef5caaa0a1 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
@@ -121,10 +121,10 @@ define void @store_i16_stride3_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-ONLY-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX2-ONLY-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
 ; AVX2-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm2
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,1,8,9],zero,zero,ymm0[2,3,10,11],zero,zero,ymm0[4,5,12,13,20,21],zero,zero,zero,zero,ymm0[22,23],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,ymm0[0,1],zero,zero,zero,zero,ymm0[2,3],zero,zero,zero,zero,zero,zero,ymm0[22,23,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,1,8,9],zero,zero,ymm2[2,3,10,11],zero,zero,ymm2[4,5,12,13,20,21],zero,zero,zero,zero,ymm2[22,23],zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX2-ONLY-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-ONLY-NEXT:    vmovq %xmm1, 16(%rcx)
@@ -138,10 +138,10 @@ define void @store_i16_stride3_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
 ; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm2
-; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,1,8,9],zero,zero,ymm0[2,3,10,11],zero,zero,ymm0[4,5,12,13,20,21],zero,zero,zero,zero,ymm0[22,23,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,ymm0[0,1],zero,zero,zero,zero,ymm0[2,3],zero,zero,zero,zero,zero,zero,ymm0[22,23,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,1,8,9],zero,zero,ymm2[2,3,10,11],zero,zero,ymm2[4,5,12,13,20,21],zero,zero,zero,zero,ymm2[22,23,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm1, 16(%rcx)
@@ -155,11 +155,12 @@ define void @store_i16_stride3_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX512BW-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,4,16,1,5,17,2,6,18,3,7,19,u,u,u,u>
-; AVX512BW-NEXT:    vpermi2w %ymm0, %ymm1, %ymm2
-; AVX512BW-NEXT:    vextracti128 $1, %ymm2, %xmm0
-; AVX512BW-NEXT:    vmovq %xmm0, 16(%rcx)
-; AVX512BW-NEXT:    vmovdqa %xmm2, (%rcx)
+; AVX512BW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,4,8,1,5,9,2,6,10,3,7,11,u,u,u,u>
+; AVX512BW-NEXT:    vpermw %ymm0, %ymm1, %ymm0
+; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512BW-NEXT:    vmovq %xmm1, 16(%rcx)
+; AVX512BW-NEXT:    vmovdqa %xmm0, (%rcx)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %in.vec0 = load <4 x i16>, ptr %in.vecptr0, align 64
@@ -1206,6 +1207,7 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
 ; AVX512F-NEXT:    vpshufb %xmm2, %xmm6, %xmm6
 ; AVX512F-NEXT:    vinserti128 $1, %xmm4, %ymm6, %ymm4
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
 ; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm7 = [4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
 ; AVX512F-NEXT:    vpshufb %xmm7, %xmm6, %xmm6
@@ -1213,7 +1215,7 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,2]
 ; AVX512F-NEXT:    vpblendw {{.*#+}} xmm3 = xmm5[0,1],xmm3[2],xmm5[3,4],xmm3[5],xmm5[6,7]
 ; AVX512F-NEXT:    vinserti128 $1, %xmm6, %ymm3, %ymm3
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm4[0,1,2,3]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm4[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <5,5,u,6,6,u,7,7,u,8,8,u,9,9,u,10>
 ; AVX512F-NEXT:    vpermd (%rdx), %zmm4, %zmm4
 ; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
@@ -1232,7 +1234,8 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,2]
 ; AVX512F-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0,1],xmm9[2],xmm6[3,4],xmm9[5],xmm6[6,7]
 ; AVX512F-NEXT:    vinserti128 $1, %xmm7, %ymm6, %ymm6
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm6[0,1,2,3]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm6
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm6[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqa (%rdx), %ymm6
 ; AVX512F-NEXT:    vmovdqa 32(%rdx), %ymm7
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = <5,5,u,6,6,u,7,7>
@@ -1248,13 +1251,14 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovdqa (%rsi), %ymm5
 ; AVX512F-NEXT:    vpshufb %ymm8, %ymm5, %ymm5
 ; AVX512F-NEXT:    vpor %ymm3, %ymm5, %ymm3
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
 ; AVX512F-NEXT:    vprold $16, %xmm0, %xmm5
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm8 = xmm1[1,1,2,2]
 ; AVX512F-NEXT:    vpblendw {{.*#+}} xmm5 = xmm8[0,1],xmm5[2],xmm8[3,4],xmm5[5],xmm8[6,7]
 ; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; AVX512F-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
 ; AVX512F-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm0
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm3[0,1,2,3]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm3[4,5,6,7]
 ; AVX512F-NEXT:    vpshufb %ymm10, %ymm6, %ymm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,0,0,u,1,1,u,2>
 ; AVX512F-NEXT:    vpermd %ymm6, %ymm2, %ymm2
@@ -2333,7 +2337,8 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovdqa (%rsi), %ymm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = [10,11,0,1,128,128,12,13,2,3,128,128,14,15,4,5,128,128,16,17,28,29,128,128,18,19,18,19,128,128,20,21]
 ; AVX512F-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512F-NEXT:    vpor %ymm0, %ymm1, %ymm3
+; AVX512F-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm3
 ; AVX512F-NEXT:    vmovdqa (%rsi), %xmm5
 ; AVX512F-NEXT:    vmovdqa64 16(%rsi), %xmm24
 ; AVX512F-NEXT:    vmovdqa 32(%rsi), %xmm6
@@ -2347,7 +2352,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
 ; AVX512F-NEXT:    vpshufb %xmm0, %xmm9, %xmm9
 ; AVX512F-NEXT:    vinserti128 $1, %xmm8, %ymm9, %ymm8
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm8[0,1,2,3],zmm3[0,1,2,3]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm8[0,1,2,3],zmm3[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqa (%rdx), %ymm3
 ; AVX512F-NEXT:    vmovdqa 32(%rdx), %ymm8
 ; AVX512F-NEXT:    vmovdqa 64(%rdx), %ymm14
@@ -2369,6 +2374,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vpshufb %xmm0, %xmm10, %xmm10
 ; AVX512F-NEXT:    vmovdqa64 %xmm0, %xmm26
 ; AVX512F-NEXT:    vinserti128 $1, %xmm11, %ymm10, %ymm10
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm10
 ; AVX512F-NEXT:    vmovdqa 80(%rdi), %xmm12
 ; AVX512F-NEXT:    vmovdqa 80(%rsi), %xmm13
 ; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm15 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
@@ -2378,7 +2384,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[1,1,2,2]
 ; AVX512F-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0,1],xmm13[2],xmm12[3,4],xmm13[5],xmm12[6,7]
 ; AVX512F-NEXT:    vinserti128 $1, %xmm15, %ymm12, %ymm12
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm15 = zmm12[0,1,2,3],zmm10[0,1,2,3]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm15 = zmm12[0,1,2,3],zmm10[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm20 = <5,5,u,6,6,u,7,7,u,8,8,u,9,9,u,10>
 ; AVX512F-NEXT:    vpermd 64(%rdx), %zmm20, %zmm10
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
@@ -2397,7 +2403,8 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[1,1,2,2]
 ; AVX512F-NEXT:    vpblendw {{.*#+}} xmm13 = xmm15[0,1],xmm13[2],xmm15[3,4],xmm13[5],xmm15[6,7]
 ; AVX512F-NEXT:    vinserti128 $1, %xmm5, %ymm13, %ymm5
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm12[0,1,2,3],zmm5[0,1,2,3]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm12[0,1,2,3],zmm5[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqa 96(%rdx), %ymm12
 ; AVX512F-NEXT:    vmovdqa64 {{.*#+}} ymm23 = <5,5,u,6,6,u,7,7>
 ; AVX512F-NEXT:    vpermd %ymm12, %ymm23, %ymm15
@@ -2412,6 +2419,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovdqa 64(%rsi), %ymm15
 ; AVX512F-NEXT:    vpshufb %ymm2, %ymm15, %ymm15
 ; AVX512F-NEXT:    vpor %ymm5, %ymm15, %ymm5
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
 ; AVX512F-NEXT:    vmovdqa 64(%rsi), %xmm15
 ; AVX512F-NEXT:    vprold $16, %xmm15, %xmm0
 ; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm13
@@ -2421,7 +2429,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vmovdqa64 %xmm26, %xmm15
 ; AVX512F-NEXT:    vpshufb %xmm15, %xmm7, %xmm7
 ; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm7, %ymm0
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm5[0,1,2,3]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm5[4,5,6,7]
 ; AVX512F-NEXT:    vpshufb %ymm9, %ymm14, %ymm5
 ; AVX512F-NEXT:    vpermd %ymm14, %ymm19, %ymm7
 ; AVX512F-NEXT:    vpandnq %ymm7, %ymm16, %ymm7
@@ -2440,7 +2448,8 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,2,2]
 ; AVX512F-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0,1],xmm13[2],xmm7[3,4],xmm13[5],xmm7[6,7]
 ; AVX512F-NEXT:    vinserti128 $1, %xmm14, %ymm7, %ymm7
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm7[0,1,2,3]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm7[4,5,6,7]
 ; AVX512F-NEXT:    vpermd %ymm8, %ymm23, %ymm7
 ; AVX512F-NEXT:    vpandnq %ymm7, %ymm22, %ymm7
 ; AVX512F-NEXT:    vpshufb %ymm9, %ymm8, %ymm8
@@ -2452,6 +2461,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
 ; AVX512F-NEXT:    vpshufb %xmm15, %xmm2, %xmm2
 ; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512F-NEXT:    vmovdqa64 %xmm24, %xmm1
 ; AVX512F-NEXT:    vmovdqa64 %xmm25, %xmm6
 ; AVX512F-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
@@ -2460,7 +2470,7 @@ define void @store_i16_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vpshufd {{.*#+}} xmm1 = xmm25[1,1,2,2]
 ; AVX512F-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2],xmm1[3,4],xmm4[5],xmm1[6,7]
 ; AVX512F-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[0,1,2,3]
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
 ; AVX512F-NEXT:    vpermd (%rdx), %zmm20, %zmm1
 ; AVX512F-NEXT:    vpternlogq $184, %zmm0, %zmm21, %zmm1
 ; AVX512F-NEXT:    vmovdqa64 %zmm1, 64(%rcx)

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
index 7d1ce5b3c55fa..bc25bb39f9691 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-4.ll
@@ -118,10 +118,10 @@ define void @store_i16_stride4_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-SLOW-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX2-SLOW-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
 ; AVX2-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm2
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,1,8,9,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,22,23,30,31]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,0,1,8,9,u,u,u,u,2,3,10,11,20,21,28,29,u,u,u,u,22,23,30,31,u,u,u,u]
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,1,8,9,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,22,23,30,31]
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm0, (%r8)
 ; AVX2-SLOW-NEXT:    vzeroupper
@@ -151,10 +151,10 @@ define void @store_i16_stride4_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-PERLANE-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX2-FAST-PERLANE-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
 ; AVX2-FAST-PERLANE-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,1,8,9,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,22,23,30,31]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,0,1,8,9,u,u,u,u,2,3,10,11,20,21,28,29,u,u,u,u,22,23,30,31,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,1,8,9,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,22,23,30,31]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6],ymm1[7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, (%r8)
 ; AVX2-FAST-PERLANE-NEXT:    vzeroupper
@@ -168,10 +168,10 @@ define void @store_i16_stride4_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX512F-SLOW-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
 ; AVX512F-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm2
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,1,8,9,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,22,23,30,31]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,0,1,8,9,u,u,u,u,2,3,10,11,20,21,28,29,u,u,u,u,22,23,30,31,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,1,8,9,u,u,u,u,2,3,10,11,u,u,u,u,u,u,u,u,20,21,28,29,u,u,u,u,22,23,30,31]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6],ymm1[7]
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, (%r8)
 ; AVX512F-SLOW-NEXT:    vzeroupper
@@ -185,9 +185,10 @@ define void @store_i16_stride4_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX512F-FAST-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
 ; AVX512F-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,2,8,10,1,3,9,11]
-; AVX512F-FAST-NEXT:    vpermi2d %ymm1, %ymm0, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[0,1,4,5,8,9,12,13,2,3,6,7,10,11,14,15,16,17,20,21,24,25,28,29,18,19,22,23,26,27,30,31]
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,2,4,6,1,3,5,7]
+; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm1, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,2,3,6,7,10,11,14,15,16,17,20,21,24,25,28,29,18,19,22,23,26,27,30,31]
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm0, (%r8)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
@@ -200,9 +201,10 @@ define void @store_i16_stride4_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX512BW-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,4,16,20,1,5,17,21,2,6,18,22,3,7,19,23]
-; AVX512BW-NEXT:    vpermi2w %ymm1, %ymm0, %ymm2
-; AVX512BW-NEXT:    vmovdqa %ymm2, (%r8)
+; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15]
+; AVX512BW-NEXT:    vpermw %ymm0, %ymm1, %ymm0
+; AVX512BW-NEXT:    vmovdqa %ymm0, (%r8)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %in.vec0 = load <4 x i16>, ptr %in.vecptr0, align 64
@@ -313,11 +315,12 @@ define void @store_i16_stride4_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW:       # %bb.0:
 ; AVX512BW-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-NEXT:    vmovdqa (%rdx), %xmm1
-; AVX512BW-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
 ; AVX512BW-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,8,32,40,1,9,33,41,2,10,34,42,3,11,35,43,4,12,36,44,5,13,37,45,6,14,38,46,7,15,39,47]
-; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, (%r8)
+; AVX512BW-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,8,16,24,1,9,17,25,2,10,18,26,3,11,19,27,4,12,20,28,5,13,21,29,6,14,22,30,7,15,23,31]
+; AVX512BW-NEXT:    vpermw %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%r8)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %in.vec0 = load <8 x i16>, ptr %in.vecptr0, align 64

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
index 367bf1945eec4..5ef699f087c32 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
@@ -384,11 +384,12 @@ define void @store_i16_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,4,8,12,32,1,5,9,13,33,2,6,10,14,34,3,7,11,15,35,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm2
-; AVX512BW-NEXT:    vextracti32x4 $2, %zmm2, %xmm0
-; AVX512BW-NEXT:    vmovq %xmm0, 32(%r9)
-; AVX512BW-NEXT:    vmovdqa %ymm2, (%r9)
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,4,8,12,16,1,5,9,13,17,2,6,10,14,18,3,7,11,15,19,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vpermw %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vextracti32x4 $2, %zmm0, %xmm1
+; AVX512BW-NEXT:    vmovq %xmm1, 32(%r9)
+; AVX512BW-NEXT:    vmovdqa %ymm0, (%r9)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %in.vec0 = load <4 x i16>, ptr %in.vecptr0, align 64
@@ -1383,29 +1384,30 @@ define void @store_i16_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm8 = xmm10[0],xmm8[1],xmm10[2],xmm8[3],xmm10[4,5],xmm8[6],xmm10[7]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,0]
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm8
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm7
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm5
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm8
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,2,1,3]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,5,6]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm5, %ymm7, %ymm6
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm8[0,1,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm7, %ymm8, %ymm6
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm5[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpbroadcastq (%r8), %ymm6
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm4[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm6
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm4[0,1,1,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm1[u,u,u,u,u,u,u,u,14,15,u,u,u,u,u,u,u,u,16,17,u,u,u,u,u,u,u,u,18,19,u,u]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm0[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm8[0],ymm5[1],ymm8[2,3],ymm5[4],ymm8[5],ymm5[6],ymm8[7,8],ymm5[9],ymm8[10,11],ymm5[12],ymm8[13],ymm5[14],ymm8[15]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm3[u,u,12,13,u,u,u,u,u,u,u,u,14,15,u,u,u,u,u,u,u,u,16,17,u,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm0[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm7[0],ymm5[1],ymm7[2,3],ymm5[4],ymm7[5],ymm5[6],ymm7[7,8],ymm5[9],ymm7[10,11],ymm5[12],ymm7[13],ymm5[14],ymm7[15]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm3[u,u,12,13,u,u,u,u,u,u,u,u,14,15,u,u,u,u,u,u,u,u,16,17,u,u,u,u,u,u,u,u]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm2[3,0,3,0,7,4,7,4]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0],ymm8[1],ymm9[2],ymm8[3],ymm9[4,5],ymm8[6],ymm9[7,8],ymm8[9],ymm9[10],ymm8[11],ymm9[12,13],ymm8[14],ymm9[15]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm9[0],ymm7[1],ymm9[2],ymm7[3],ymm9[4,5],ymm7[6],ymm9[7,8],ymm7[9],ymm9[10],ymm7[11],ymm9[12,13],ymm7[14],ymm9[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm5, %ymm9, %ymm8
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm5, %ymm9, %ymm7
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm0[0,1,2,1,4,5,6,5]
 ; AVX512F-SLOW-NEXT:    vprolq $16, %ymm1, %ymm10
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm10[0,1],ymm5[2],ymm10[3],ymm5[4],ymm10[5,6],ymm5[7],ymm10[8,9],ymm5[10],ymm10[11],ymm5[12],ymm10[13,14],ymm5[15]
@@ -1414,8 +1416,9 @@ define void @store_i16_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm11[0],ymm10[1],ymm11[2,3],ymm10[4],ymm11[5],ymm10[6],ymm11[7,8],ymm10[9],ymm11[10,11],ymm10[12],ymm11[13],ymm10[14],ymm11[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm5, %ymm7, %ymm10
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm10[0,1,2,3]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm5, %ymm8, %ymm10
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm5
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm7[0,1,2,3],zmm5[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 16(%r8), %ymm7
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
 ; AVX512F-SLOW-NEXT:    vpandn %ymm7, %ymm8, %ymm7
@@ -1460,28 +1463,29 @@ define void @store_i16_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm10[0],xmm8[1],xmm10[2],xmm8[3],xmm10[4,5],xmm8[6],xmm10[7]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,0]
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm8
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm7
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm5
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm8
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm5, %ymm7, %ymm6
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm8[0,1,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm7, %ymm8, %ymm6
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm5[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpbroadcastq (%r8), %ymm6
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm4[0,1,1,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm6
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm4[0,1,1,1]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm1[u,u,u,u,u,u,u,u,14,15,u,u,u,u,u,u,u,u,16,17,u,u,u,u,u,u,u,u,18,19,u,u]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm0[1,1,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm8[0],ymm5[1],ymm8[2,3],ymm5[4],ymm8[5],ymm5[6],ymm8[7,8],ymm5[9],ymm8[10,11],ymm5[12],ymm8[13],ymm5[14],ymm8[15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm3[u,u,12,13,u,u,u,u,u,u,u,u,14,15,u,u,u,u,u,u,u,u,16,17,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm0[1,1,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm7[0],ymm5[1],ymm7[2,3],ymm5[4],ymm7[5],ymm5[6],ymm7[7,8],ymm5[9],ymm7[10,11],ymm5[12],ymm7[13],ymm5[14],ymm7[15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm3[u,u,12,13,u,u,u,u,u,u,u,u,14,15,u,u,u,u,u,u,u,u,16,17,u,u,u,u,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm2[3,0,3,0,7,4,7,4]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0],ymm8[1],ymm9[2],ymm8[3],ymm9[4,5],ymm8[6],ymm9[7,8],ymm8[9],ymm9[10],ymm8[11],ymm9[12,13],ymm8[14],ymm9[15]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm9[0],ymm7[1],ymm9[2],ymm7[3],ymm9[4,5],ymm7[6],ymm9[7,8],ymm7[9],ymm9[10],ymm7[11],ymm9[12,13],ymm7[14],ymm9[15]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm5, %ymm9, %ymm8
+; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm5, %ymm9, %ymm7
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm0[0,1,2,1,4,5,6,5]
 ; AVX512F-FAST-NEXT:    vprolq $16, %ymm1, %ymm10
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm10[0,1],ymm5[2],ymm10[3],ymm5[4],ymm10[5,6],ymm5[7],ymm10[8,9],ymm5[10],ymm10[11],ymm5[12],ymm10[13,14],ymm5[15]
@@ -1490,8 +1494,9 @@ define void @store_i16_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm11[0],ymm10[1],ymm11[2,3],ymm10[4],ymm11[5],ymm10[6],ymm11[7,8],ymm10[9],ymm11[10,11],ymm10[12],ymm11[13],ymm10[14],ymm11[15]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm5, %ymm7, %ymm10
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm10[0,1,2,3]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm5, %ymm8, %ymm10
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm5
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm7[0,1,2,3],zmm5[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpbroadcastq 16(%r8), %ymm7
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
 ; AVX512F-FAST-NEXT:    vpandn %ymm7, %ymm8, %ymm7
@@ -2762,313 +2767,321 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-SLOW-LABEL: store_i16_stride5_vf32:
 ; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm1
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm1[3,2,3,3,7,6,7,7]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm2
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm2[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdx), %ymm18
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm4
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm18[3,2,3,3,7,6,7,7]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm1
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm1[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2],ymm0[3,4],ymm3[5,6,7,8],ymm0[9],ymm3[10],ymm0[11,12],ymm3[13,14,15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm0[2,2,3,2]
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm9
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm9[0],xmm3[0],xmm9[1],xmm3[1],xmm9[2],xmm3[2],xmm9[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm0[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm6
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm6[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm7
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm7[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,3,2,2,6,7,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm4[0],ymm0[1],ymm4[2],ymm0[3],ymm4[4,5],ymm0[6],ymm4[7,8],ymm0[9],ymm4[10],ymm0[11],ymm4[12,13],ymm0[14],ymm4[15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm21 = ymm0[2,3,2,2]
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm4
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,2,1,3]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,5,6]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm4[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm15
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm15, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 32(%rdi), %ymm19
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm19[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm8[0],ymm4[1],ymm8[2,3],ymm4[4],ymm8[5],ymm4[6],ymm8[7,8],ymm4[9],ymm8[10,11],ymm4[12],ymm8[13],ymm4[14],ymm8[15]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpbroadcastq 40(%rdi), %xmm8
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm8[1],xmm0[2,3],xmm8[4],xmm0[5],xmm8[6],xmm0[7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm23
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm0, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm25
-; AVX512F-SLOW-NEXT:    vmovdqa64 32(%rdx), %ymm20
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm20[3,0,3,0,7,4,7,4]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm14[0],ymm4[1],ymm14[2],ymm4[3],ymm14[4,5],ymm4[6],ymm14[7,8],ymm4[9],ymm14[10],ymm4[11],ymm14[12,13],ymm4[14],ymm14[15]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,2,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm9[0],xmm3[1],xmm9[2],xmm3[3],xmm9[4,5],xmm3[6],xmm9[7]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm5
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,0]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm8
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm3, %zmm9
-; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm8, %xmm3
-; AVX512F-SLOW-NEXT:    vpbroadcastq 8(%rdi), %xmm12
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm12[1],xmm3[2,3],xmm12[4],xmm3[5],xmm12[6],xmm3[7]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm12
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm12[0],xmm8[0],xmm12[1],xmm8[1],xmm12[2],xmm8[2],xmm12[3],xmm8[3]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm14
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3,4],ymm3[5,6,7,8],ymm2[9],ymm3[10],ymm2[11,12],ymm3[13,14,15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,3,2]
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm9
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm12
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm10
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm22
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm3
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm3[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm5
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm5[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,3,2,2,6,7,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0],ymm6[1],ymm7[2],ymm6[3],ymm7[4,5],ymm6[6],ymm7[7,8],ymm6[9],ymm7[10],ymm6[11],ymm7[12,13],ymm6[14],ymm7[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,2]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm15
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm11
+; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %xmm24
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm7
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[0,2,1,3]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,5,6]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm16, %zmm6
+; AVX512F-SLOW-NEXT:    vpbroadcastq 24(%r8), %ymm2
+; AVX512F-SLOW-NEXT:    vpbroadcastq 32(%r8), %ymm7
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm2, %zmm2
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm2
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm13
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm13, %ymm6
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm20
+; AVX512F-SLOW-NEXT:    vmovdqa64 32(%rdi), %ymm17
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm17[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0],ymm6[1],ymm7[2,3],ymm6[4],ymm7[5],ymm6[6],ymm7[7,8],ymm6[9],ymm7[10,11],ymm6[12],ymm7[13],ymm6[14],ymm7[15]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm11, %xmm11
+; AVX512F-SLOW-NEXT:    vpbroadcastq 40(%rdi), %xmm14
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm11 = xmm11[0],xmm14[1],xmm11[2,3],xmm14[4],xmm11[5],xmm14[6],xmm11[7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm11, %zmm11
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm6
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm6, %ymm14
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm21
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm4[3,0,3,0,7,4,7,4]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm14[1],ymm8[2],ymm14[3],ymm8[4,5],ymm14[6],ymm8[7,8],ymm14[9],ymm8[10],ymm14[11],ymm8[12,13],ymm14[14],ymm8[15]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm10, %xmm10
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[1,2,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1],xmm10[2],xmm9[3],xmm10[4,5],xmm9[6],xmm10[7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,0]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm8
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm8
+; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm9
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm10
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm10, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm23
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,1,1,1]
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
+; AVX512F-SLOW-NEXT:    vpandnq %ymm10, %ymm19, %ymm10
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm10, %zmm10
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm10
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm0
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm11
+; AVX512F-SLOW-NEXT:    vpshufb %xmm11, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm12, %xmm12
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,2,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm12[0],xmm0[1],xmm12[2],xmm0[3],xmm12[4,5],xmm0[6],xmm12[7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,0]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm15, %xmm7
+; AVX512F-SLOW-NEXT:    vpbroadcastq 8(%rdi), %xmm8
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm7[0],xmm8[1],xmm7[2,3],xmm8[4],xmm7[5],xmm8[6],xmm7[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm8
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm15[0],xmm8[1],xmm15[1],xmm8[2],xmm15[2],xmm8[3],xmm15[3]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,2,1,3]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,4,5,6]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,0,1],zmm3[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm12
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm12[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm12, %ymm12
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpandnq %ymm18, %ymm24, %ymm18
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm18, %zmm12
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm14[0],xmm5[0],xmm14[1],xmm5[1],xmm14[2],xmm5[2],xmm14[3],xmm5[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm10[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm14, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,2,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2],xmm5[3],xmm4[4,5],xmm5[6],xmm4[7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm4[0,1,0,0]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm19[0,1,2,1,4,5,6,5]
-; AVX512F-SLOW-NEXT:    vprolq $16, %ymm15, %ymm5
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3],ymm4[4],ymm5[5,6],ymm4[7],ymm5[8,9],ymm4[10],ymm5[11],ymm4[12],ymm5[13,14],ymm4[15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm19[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm15[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[2,3,2,2,6,7,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm10[0],ymm4[1],ymm10[2],ymm4[3],ymm10[4,5],ymm4[6],ymm10[7,8],ymm4[9],ymm10[10],ymm4[11],ymm10[12,13],ymm4[14],ymm10[15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm4[2,3,2,2]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm0, %ymm14
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm20[1,1,1,2,5,5,5,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm14[0],ymm15[1],ymm14[2,3],ymm15[4],ymm14[5],ymm15[6],ymm14[7,8],ymm15[9],ymm14[10,11],ymm15[12],ymm14[13],ymm15[14],ymm14[15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm20[3,2,3,3,7,6,7,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm15[1],ymm0[2],ymm15[3,4],ymm0[5,6,7,8],ymm15[9],ymm0[10],ymm15[11,12],ymm0[13,14,15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,3,2]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm7, %ymm15
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm6[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0],ymm15[1],ymm11[2,3],ymm15[4],ymm11[5],ymm15[6],ymm11[7,8],ymm15[9],ymm11[10,11],ymm15[12],ymm11[13],ymm15[14],ymm11[15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,1,2,1,4,5,6,5]
-; AVX512F-SLOW-NEXT:    vprolq $16, %ymm7, %ymm7
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7],ymm7[8,9],ymm6[10],ymm7[11],ymm6[12],ymm7[13,14],ymm6[15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm11, %zmm6
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm7
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm2, %ymm7
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm1[3,0,3,0,7,4,7,4]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm11[0],ymm7[1],ymm11[2],ymm7[3],ymm11[4,5],ymm7[6],ymm11[7,8],ymm7[9],ymm11[10],ymm7[11],ymm11[12,13],ymm7[14],ymm11[15]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm2, %ymm2
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[1,1,1,2,5,5,5,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm7, %zmm8, %zmm7
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm7 = zmm7[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm8, %zmm7
+; AVX512F-SLOW-NEXT:    vpbroadcastq (%r8), %ymm0
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm9[0,1,1,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm17[0,1,2,1,4,5,6,5]
+; AVX512F-SLOW-NEXT:    vprolq $16, %ymm13, %ymm12
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm12[0,1],ymm7[2],ymm12[3],ymm7[4],ymm12[5,6],ymm7[7],ymm12[8,9],ymm7[10],ymm12[11],ymm7[12],ymm12[13,14],ymm7[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm17[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm13[2,3,2,2,6,7,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm13[0],ymm12[1],ymm13[2],ymm12[3],ymm13[4,5],ymm12[6],ymm13[7,8],ymm12[9],ymm13[10],ymm12[11],ymm13[12,13],ymm12[14],ymm13[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,2]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm7, %zmm12
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm6, %ymm13
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm4[1,1,1,2,5,5,5,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm14[1],ymm13[2,3],ymm14[4],ymm13[5],ymm14[6],ymm13[7,8],ymm14[9],ymm13[10,11],ymm14[12],ymm13[13],ymm14[14],ymm13[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[3,2,3,3,7,6,7,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm6[0],ymm4[1],ymm6[2],ymm4[3,4],ymm6[5,6,7,8],ymm4[9],ymm6[10],ymm4[11,12],ymm6[13,14,15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,3,2]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm13, %zmm4
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm12, %zmm8, %zmm4
+; AVX512F-SLOW-NEXT:    vpbroadcastq 48(%r8), %ymm6
+; AVX512F-SLOW-NEXT:    vpbroadcastq 56(%r8), %ymm8
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm6
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm6
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm4
+; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm5, %ymm4
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm3[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm8[0],ymm4[1],ymm8[2,3],ymm4[4],ymm8[5],ymm4[6],ymm8[7,8],ymm4[9],ymm8[10,11],ymm4[12],ymm8[13],ymm4[14],ymm8[15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,1,2,1,4,5,6,5]
+; AVX512F-SLOW-NEXT:    vprolq $16, %ymm5, %ymm5
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0,1],ymm3[2],ymm5[3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8,9],ymm3[10],ymm5[11],ymm3[12],ymm5[13,14],ymm3[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm4
+; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm1, %ymm4
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm18[3,0,3,0,7,4,7,4]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7,8],ymm4[9],ymm5[10],ymm4[11],ymm5[12,13],ymm4[14],ymm5[15]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm1, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm18[1,1,1,2,5,5,5,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm5[1],ymm1[2,3],ymm5[4],ymm1[5],ymm5[6],ymm1[7,8],ymm5[9],ymm1[10,11],ymm5[12],ymm1[13],ymm5[14],ymm1[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm7, %zmm1
-; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm2
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm2, %ymm2
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm4, %zmm1
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm16, %zmm1
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 16(%r8), %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpandn %ymm3, %ymm7, %ymm3
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm16, %zmm3
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm21, %zmm7
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm11, %zmm7
-; AVX512F-SLOW-NEXT:    vpbroadcastq 24(%r8), %ymm3
-; AVX512F-SLOW-NEXT:    vpbroadcastq 32(%r8), %ymm15
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm3, %zmm3
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm3
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm23, %zmm9
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm12
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm18, %zmm13, %zmm7
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm8, %zmm9, %zmm7
-; AVX512F-SLOW-NEXT:    vpbroadcastq (%r8), %ymm8
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm8, %zmm4
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm4
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm5, %zmm5
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm14, %zmm0
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm5, %zmm9, %zmm0
-; AVX512F-SLOW-NEXT:    vpbroadcastq 48(%r8), %ymm5
-; AVX512F-SLOW-NEXT:    vpbroadcastq 56(%r8), %ymm7
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm5
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm6, %zmm11, %zmm1
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, 64(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm5, 256(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm4, (%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm12, 192(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm3, 128(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpandn %ymm3, %ymm4, %ymm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm4
+; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm9, %ymm4
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm3, 64(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm6, 256(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, (%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm10, 192(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, 128(%r9)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
 ; AVX512F-FAST-LABEL: store_i16_stride5_vf32:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm9
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm11
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm9, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm18
-; AVX512F-FAST-NEXT:    vmovdqa64 32(%rdi), %ymm17
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm17[1,1,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13>
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm5, %xmm1
-; AVX512F-FAST-NEXT:    vpbroadcastq 40(%rdi), %xmm2
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm3
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm11
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
 ; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm12
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm12[3,0,3,0,7,4,7,4]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm7
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm7, %xmm14
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm8
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm15 = xmm8[1,2,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm14 = xmm14[0],xmm15[1],xmm14[2],xmm15[3],xmm14[4,5],xmm15[6],xmm14[7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,0,0]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm14, %zmm14
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm14
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm15
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm15, %ymm10
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm4, %ymm20
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,1,1,1]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpandnq %ymm15, %ymm16, %ymm15
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm15, %zmm16
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm16
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm6, %xmm10
-; AVX512F-FAST-NEXT:    vpbroadcastq 8(%rdi), %xmm13
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm13[1],xmm10[2,3],xmm13[4],xmm10[5],xmm13[6],xmm10[7]
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm14
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm13
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm14[0],xmm6[0],xmm14[1],xmm6[1],xmm14[2],xmm6[2],xmm14[3],xmm6[3]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
-; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm6, %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm21
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,0,1],zmm10[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm2[1,2,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,0]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm10, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm1, %zmm0
-; AVX512F-FAST-NEXT:    vpbroadcastq (%r8), %ymm2
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm3[0,1,1,1]
-; AVX512F-FAST-NEXT:    vmovdqa %ymm3, %ymm14
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm2, %zmm6
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm6
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm0 = ymm17[0,1,2,1,4,5,6,5]
-; AVX512F-FAST-NEXT:    vprolq $16, %ymm9, %ymm2
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3],ymm0[4],ymm2[5,6],ymm0[7],ymm2[8,9],ymm0[10],ymm2[11],ymm0[12],ymm2[13,14],ymm0[15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[2,3,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29>
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm9, %ymm9
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm10 = ymm17[2,3,2,3,6,7,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0],ymm10[1],ymm9[2],ymm10[3],ymm9[4,5],ymm10[6],ymm9[7,8],ymm10[9],ymm9[10],ymm10[11],ymm9[12,13],ymm10[14],ymm9[15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,2]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm2, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25>
-; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm11, %ymm10
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm12[1,1,1,2,5,5,5,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm10[0],ymm4[1],ymm10[2,3],ymm4[4],ymm10[5],ymm4[6],ymm10[7,8],ymm4[9],ymm10[10,11],ymm4[12],ymm10[13],ymm4[14],ymm10[15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31>
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm11, %ymm10
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm12[3,2,3,3,7,6,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0],ymm11[1],ymm10[2],ymm11[3,4],ymm10[5,6,7,8],ymm11[9],ymm10[10],ymm11[11,12],ymm10[13,14,15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,3,2]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm4, %zmm4
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm1, %zmm4
-; AVX512F-FAST-NEXT:    vpbroadcastq 48(%r8), %ymm1
-; AVX512F-FAST-NEXT:    vpbroadcastq 56(%r8), %ymm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm10
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm10
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm11
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm11, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm1
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm1[3,2,3,3,7,6,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3,4],ymm0[5,6,7,8],ymm2[9],ymm0[10],ymm2[11,12],ymm0[13,14,15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,3,2]
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3]
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm2, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm20
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm12
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm12[1,1,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm6
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13>
+; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm6, %xmm2
+; AVX512F-FAST-NEXT:    vpbroadcastq 40(%rdi), %xmm3
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5],xmm3[6],xmm2[7]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm2, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm4
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm4[2,3,2,3,6,7,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm7[1],ymm3[2],ymm7[3],ymm3[4,5],ymm7[6],ymm3[7,8],ymm7[9],ymm3[10],ymm7[11],ymm3[12,13],ymm7[14],ymm3[15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,2]
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm13[0],xmm5[0],xmm13[1],xmm5[1],xmm13[2],xmm5[2],xmm13[3],xmm5[3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm21, %xmm7
-; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm5, %xmm5
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm16
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm13
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm21
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm14
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm14[3,0,3,0,7,4,7,4]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm8
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm8, %xmm3
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm9
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm9[1,2,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4,5],xmm4[6],xmm3[7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,0]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm3
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm5, %ymm22
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,1,1]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
+; AVX512F-FAST-NEXT:    vpandn %ymm1, %ymm10, %ymm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm18
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm4
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,2,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm0, %xmm1
+; AVX512F-FAST-NEXT:    vpbroadcastq 8(%rdi), %xmm2
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm17
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm0 = ymm12[0,1,2,1,4,5,6,5]
+; AVX512F-FAST-NEXT:    vprolq $16, %ymm11, %ymm1
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm1[0,1],ymm0[2],ymm1[3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8,9],ymm0[10],ymm1[11],ymm0[12],ymm1[13,14],ymm0[15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29>
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm12[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7,8],ymm2[9],ymm0[10],ymm2[11],ymm0[12,13],ymm2[14],ymm0[15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25>
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm23
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm14[1,1,1,2,5,5,5,6]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5],ymm2[6],ymm0[7,8],ymm2[9],ymm0[10,11],ymm2[12],ymm0[13],ymm2[14],ymm0[15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31>
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm13
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[3,2,3,3,7,6,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm13[0],ymm14[1],ymm13[2],ymm14[3,4],ymm13[5,6,7,8],ymm14[9],ymm13[10],ymm14[11,12],ymm13[13,14,15]
+; AVX512F-FAST-NEXT:    vmovdqa64 (%rdx), %ymm19
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm13
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm2
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm19[3,2,3,3,7,6,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm5[1],ymm2[2],ymm5[3,4],ymm2[5,6,7,8],ymm5[9],ymm2[10],ymm5[11,12],ymm2[13,14,15]
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm15, %xmm9
+; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm5, %xmm5
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm8
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm8, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm15
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm15[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm11[1],ymm1[2],ymm11[3],ymm1[4,5],ymm11[6],ymm1[7,8],ymm11[9],ymm1[10],ymm11[11],ymm1[12,13],ymm11[14],ymm1[15]
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm11
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm11[0],xmm6[0],xmm11[1],xmm6[1],xmm11[2],xmm6[2],xmm11[3],xmm6[3]
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm6, %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm20, %ymm6
+; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm8, %ymm6
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm15[1,1,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm11[0],ymm6[1],ymm11[2,3],ymm6[4],ymm11[5],ymm6[6],ymm11[7,8],ymm6[9],ymm11[10,11],ymm6[12],ymm11[13],ymm6[14],ymm11[15]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,1,0,0]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm12[2,3,2,2]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm14[2,2,3,2]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,3,2]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm5, %zmm3
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,2]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm14 = ymm15[0,1,2,1,4,5,6,5]
+; AVX512F-FAST-NEXT:    vprolq $16, %ymm8, %ymm8
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0,1],ymm14[2],ymm8[3],ymm14[4],ymm8[5,6],ymm14[7],ymm8[8,9],ymm14[10],ymm8[11],ymm14[12],ymm8[13,14],ymm14[15]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm6
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm21, %ymm8
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm13, %ymm8
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm14 = ymm19[3,0,3,0,7,4,7,4]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm14[0],ymm8[1],ymm14[2],ymm8[3],ymm14[4,5],ymm8[6],ymm14[7,8],ymm8[9],ymm14[10],ymm8[11],ymm14[12,13],ymm8[14],ymm14[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm23, %ymm14
+; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm13, %ymm13
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm14 = ymm19[1,1,1,2,5,5,5,6]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm14[1],ymm13[2,3],ymm14[4],ymm13[5],ymm14[6],ymm13[7,8],ymm14[9],ymm13[10,11],ymm14[12],ymm13[13],ymm14[14],ymm13[15]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm8, %zmm8
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm13
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm13[0,1,1,1]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm22, %ymm15
+; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm13, %ymm13
+; AVX512F-FAST-NEXT:    vpbroadcastq 16(%r8), %ymm15
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpandnq %ymm15, %ymm19, %ymm15
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm13, %zmm13
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm3
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm18
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm3
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm9 = zmm17[0,1,0,1,4,5,4,5]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm10, %zmm9
+; AVX512F-FAST-NEXT:    vpbroadcastq (%r8), %ymm3
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm3, %zmm3
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm3
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm7, %zmm7
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm7, %zmm10, %zmm0
+; AVX512F-FAST-NEXT:    vpbroadcastq 48(%r8), %ymm7
+; AVX512F-FAST-NEXT:    vpbroadcastq 56(%r8), %ymm9
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm7
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm2, %zmm1
 ; AVX512F-FAST-NEXT:    vpbroadcastq 24(%r8), %ymm0
-; AVX512F-FAST-NEXT:    vpbroadcastq 32(%r8), %ymm7
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm18, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm2, %ymm3
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm4[1,1,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm7[0],ymm3[1],ymm7[2,3],ymm3[4],ymm7[5],ymm3[6],ymm7[7,8],ymm3[9],ymm7[10,11],ymm3[12],ymm7[13],ymm3[14],ymm7[15]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[0,1,2,1,4,5,6,5]
-; AVX512F-FAST-NEXT:    vprolq $16, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3],ymm4[4],ymm2[5,6],ymm4[7],ymm2[8,9],ymm4[10],ymm2[11],ymm4[12],ymm2[13,14],ymm4[15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm19, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm11, %ymm3
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm1[3,0,3,0,7,4,7,4]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4,5],ymm3[6],ymm4[7,8],ymm3[9],ymm4[10],ymm3[11],ymm4[12,13],ymm3[14],ymm4[15]
-; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm11, %ymm4
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[1,1,1,2,5,5,5,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm4[0],ymm1[1],ymm4[2,3],ymm1[4],ymm4[5],ymm1[6],ymm4[7,8],ymm1[9],ymm4[10,11],ymm1[12],ymm4[13],ymm1[14],ymm4[15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm5, %zmm1
-; AVX512F-FAST-NEXT:    vpbroadcastq 16(%r8), %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpandn %ymm2, %ymm3, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm20, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm14, %ymm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, 64(%r9)
+; AVX512F-FAST-NEXT:    vpbroadcastq 32(%r8), %ymm4
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm2, %zmm8
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm13
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm13, 64(%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, 128(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm10, 256(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, (%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm16, 192(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, 256(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, (%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm18, 192(%r9)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -5689,679 +5702,670 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-SLOW-LABEL: store_i16_stride5_vf64:
 ; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    subq $712, %rsp # imm = 0x2C8
+; AVX512F-SLOW-NEXT:    subq $648, %rsp # imm = 0x288
 ; AVX512F-SLOW-NEXT:    vmovdqa 96(%rcx), %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm30
-; AVX512F-SLOW-NEXT:    vmovdqa64 96(%rdx), %ymm19
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm19[3,0,3,0,7,4,7,4]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm20
+; AVX512F-SLOW-NEXT:    vmovdqa64 96(%rdx), %ymm26
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm26[3,0,3,0,7,4,7,4]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa 96(%rcx), %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm0, %xmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdx), %xmm3
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm3[1,2,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm5[1],xmm2[2],xmm5[3],xmm2[4,5],xmm5[6],xmm2[7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[1,2,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4,5],xmm4[6],xmm2[7]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,0]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rsi), %ymm8
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm8, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm20
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm15
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm15[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rsi), %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rsi), %ymm7
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm7, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 96(%rdi), %ymm21
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm21[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rsi), %xmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm1, %xmm5
-; AVX512F-SLOW-NEXT:    vpbroadcastq 104(%rdi), %xmm7
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm7[1],xmm5[2,3],xmm7[4],xmm5[5],xmm7[6],xmm5[7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm5, %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%r8), %ymm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,ymm2[12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm2[16,17],zero,zero,zero,zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm2[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpandn %ymm7, %ymm6, %ymm7
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm7, %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm2[3,2,3,3,7,6,7,7]
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm2[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm7[0],ymm5[1],ymm7[2],ymm5[3,4],ymm7[5,6,7,8],ymm5[9],ymm7[10],ymm5[11,12],ymm7[13,14,15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm5[2,2,3,2]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm2, %xmm4
+; AVX512F-SLOW-NEXT:    vpbroadcastq 104(%rdi), %xmm5
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5],xmm5[6],xmm4[7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm4, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%r8), %ymm1
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,ymm1[12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[16,17],zero,zero,zero,zero,zero,zero
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm1[0,1,1,1]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
+; AVX512F-SLOW-NEXT:    vpandn %ymm5, %ymm9, %ymm5
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdx), %ymm27
+; AVX512F-SLOW-NEXT:    vmovdqa64 64(%rdx), %ymm31
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm31[3,2,3,3,7,6,7,7]
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm1[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2],ymm4[3,4],ymm5[5,6,7,8],ymm4[9],ymm5[10],ymm4[11,12],ymm5[13,14,15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm4[2,2,3,2]
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm31
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm31[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rsi), %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, (%rsp) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm2[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm28
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm28[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rsi), %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm1[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,2,6,7,6,6]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7,8],ymm0[9],ymm3[10],ymm0[11],ymm3[12,13],ymm0[14],ymm3[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,2]
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm0
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rsi), %xmm3
-; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm3, %xmm1
-; AVX512F-SLOW-NEXT:    vpbroadcastq 72(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm1[0],xmm5[1],xmm1[2,3],xmm5[4],xmm1[5],xmm5[6],xmm1[7]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm9
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm9[0],xmm3[0],xmm9[1],xmm3[1],xmm9[2],xmm3[2],xmm9[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,2,1,3]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,5,6]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm3[0,1,0,1],zmm5[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rcx), %xmm5
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm21
-; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm5, %xmm5
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,2,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm5[0],xmm3[1],xmm5[2],xmm3[3],xmm5[4,5],xmm3[6],xmm5[7]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm0, %xmm3
-; AVX512F-SLOW-NEXT:    vpbroadcastq 8(%rdi), %xmm5
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm5[1],xmm3[2,3],xmm5[4],xmm3[5],xmm5[6],xmm3[7]
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdx), %ymm22
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm22[3,2,3,3,7,6,7,7]
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rcx), %xmm2
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm19
+; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,2,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4,5],xmm0[6],xmm2[7]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rsi), %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm0, %xmm2
+; AVX512F-SLOW-NEXT:    vpbroadcastq 72(%rdi), %xmm4
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3],xmm4[4],xmm2[5],xmm4[6],xmm2[7]
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1],zmm3[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm14
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm14[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm9[1],ymm0[2],ymm9[3,4],ymm0[5,6,7,8],ymm9[9],ymm0[10],ymm9[11,12],ymm0[13,14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm11
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm27[3,2,3,3,7,6,7,7]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm6
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm6[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm4[0],ymm0[1],ymm4[2],ymm0[3,4],ymm4[5,6,7,8],ymm0[9],ymm4[10],ymm0[11,12],ymm4[13,14,15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,3,2]
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm11[0],xmm5[0],xmm11[1],xmm5[1],xmm11[2],xmm5[2],xmm11[3],xmm5[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm4
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm4[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm3, %xmm0
+; AVX512F-SLOW-NEXT:    vpbroadcastq 8(%rdi), %xmm4
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2,3],xmm4[4],xmm0[5],xmm4[6],xmm0[7]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm4
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm4
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[0,2,1,3]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,5,6]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm3, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm11
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm0[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm2
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm2[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,2,6,7,6,6]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm16
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm16[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0],ymm9[1],ymm3[2],ymm9[3],ymm3[4,5],ymm9[6],ymm3[7,8],ymm9[9],ymm3[10],ymm9[11],ymm3[12,13],ymm9[14],ymm3[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm18
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm2
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm16[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0],ymm8[1],ymm3[2],ymm8[3],ymm3[4,5],ymm8[6],ymm3[7,8],ymm8[9],ymm3[10],ymm8[11],ymm3[12,13],ymm8[14],ymm3[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm17
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm1
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm3, %ymm9
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm2[3,0,3,0,7,4,7,4]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm12[0],ymm9[1],ymm12[2],ymm9[3],ymm12[4,5],ymm9[6],ymm12[7,8],ymm9[9],ymm12[10],ymm9[11],ymm12[12,13],ymm9[14],ymm12[15]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,2,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm11[0],xmm5[1],xmm11[2],xmm5[3],xmm11[4,5],xmm5[6],xmm11[7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,0]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm5, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm5
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm5, %ymm9
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm1[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm11[0],ymm9[1],ymm11[2,3],ymm9[4],ymm11[5],ymm9[6],ymm11[7,8],ymm9[9],ymm11[10,11],ymm9[12],ymm11[13],ymm9[14],ymm11[15]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm3, %ymm8
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm1[3,0,3,0,7,4,7,4]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm12[0],ymm8[1],ymm12[2],ymm8[3],ymm12[4,5],ymm8[6],ymm12[7,8],ymm8[9],ymm12[10],ymm8[11],ymm12[12,13],ymm8[14],ymm12[15]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm4, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[1,2,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm11[1],xmm4[2],xmm11[3],xmm4[4,5],xmm11[6],xmm4[7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,0]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm4, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm4
+; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm4, %ymm8
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm0[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm11[0],ymm8[1],ymm11[2,3],ymm8[4],ymm11[5],ymm8[6],ymm11[7,8],ymm8[9],ymm11[10,11],ymm8[12],ymm11[13],ymm8[14],ymm11[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm11
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm12
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm17
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm12, %xmm18
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm11, %xmm10
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 40(%rdi), %xmm11
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm11[1],xmm10[2,3],xmm11[4],xmm10[5],xmm11[6],xmm10[7]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm10, %zmm26
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm9
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm9[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vpandn %ymm10, %ymm6, %ymm6
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm10, %zmm29
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm8
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm8[0,1,1,1]
+; AVX512F-SLOW-NEXT:    vpandn %ymm10, %ymm9, %ymm9
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm9, %ymm9
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm6, %zmm10
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm6
-; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm21
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm9
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm9[0],xmm6[0],xmm9[1],xmm6[1],xmm9[2],xmm6[2],xmm9[3],xmm6[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm11, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm29
-; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm9, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,2,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0],xmm6[1],xmm7[2],xmm6[3],xmm7[4,5],xmm6[6],xmm7[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm28
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm15[0,1,2,1,4,5,6,5]
-; AVX512F-SLOW-NEXT:    vprolq $16, %ymm8, %ymm7
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm7[0,1],ymm6[2],ymm7[3],ymm6[4],ymm7[5,6],ymm6[7],ymm7[8,9],ymm6[10],ymm7[11],ymm6[12],ymm7[13,14],ymm6[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm27
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm8[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[2,3,2,2,6,7,6,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm15[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm6[0],ymm7[1],ymm6[2],ymm7[3],ymm6[4,5],ymm7[6],ymm6[7,8],ymm7[9],ymm6[10],ymm7[11],ymm6[12,13],ymm7[14],ymm6[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm15, %ymm23
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25>
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm0, %ymm7
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm19[1,1,1,2,5,5,5,6]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm8, %ymm8
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm10
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm22
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm9
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm11, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm25
+; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm8, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm9[1,2,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm8[1],xmm5[2],xmm8[3],xmm5[4,5],xmm8[6],xmm5[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm24
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm21[0,1,2,1,4,5,6,5]
+; AVX512F-SLOW-NEXT:    vprolq $16, %ymm7, %ymm8
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm8[0,1],ymm5[2],ymm8[3],ymm5[4],ymm8[5,6],ymm5[7],ymm8[8,9],ymm5[10],ymm8[11],ymm5[12],ymm8[13,14],ymm5[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm23
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm7[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,3,2,2,6,7,6,6]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm21[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm7[1],ymm5[2],ymm7[3],ymm5[4,5],ymm7[6],ymm5[7,8],ymm7[9],ymm5[10],ymm7[11],ymm5[12,13],ymm7[14],ymm5[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm19
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25>
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm9, %ymm7
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm26[1,1,1,2,5,5,5,6]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm7[0],ymm8[1],ymm7[2,3],ymm8[4],ymm7[5],ymm8[6],ymm7[7,8],ymm8[9],ymm7[10,11],ymm8[12],ymm7[13],ymm8[14],ymm7[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm15, %ymm25
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm0[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm9[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm19[3,2,3,3,7,6,7,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm7[0],ymm8[1],ymm7[2],ymm8[3,4],ymm7[5,6,7,8],ymm8[9],ymm7[10],ymm8[11,12],ymm7[13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm1[0,1,2,1,4,5,6,5]
-; AVX512F-SLOW-NEXT:    vprolq $16, %ymm5, %ymm8
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm26[3,2,3,3,7,6,7,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0],ymm8[1],ymm7[2],ymm8[3,4],ymm7[5,6,7,8],ymm8[9],ymm7[10],ymm8[11,12],ymm7[13,14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm7, %ymm26
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm0[0,1,2,1,4,5,6,5]
+; AVX512F-SLOW-NEXT:    vprolq $16, %ymm4, %ymm8
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0,1],ymm7[2],ymm8[3],ymm7[4],ymm8[5,6],ymm7[7],ymm8[8,9],ymm7[10],ymm8[11],ymm7[12],ymm8[13,14],ymm7[15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,3,2,2,6,7,6,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm1[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm5[0],ymm0[1],ymm5[2],ymm0[3],ymm5[4,5],ymm0[6],ymm5[7,8],ymm0[9],ymm5[10],ymm0[11],ymm5[12,13],ymm0[14],ymm5[15]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm3, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm2[1,1,1,2,5,5,5,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm0[0],ymm5[1],ymm0[2,3],ymm5[4],ymm0[5],ymm5[6],ymm0[7,8],ymm5[9],ymm0[10,11],ymm5[12],ymm0[13],ymm5[14],ymm0[15]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,3,2,2,6,7,6,6]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm4[0],ymm0[1],ymm4[2],ymm0[3],ymm4[4,5],ymm0[6],ymm4[7,8],ymm0[9],ymm4[10],ymm0[11],ymm4[12,13],ymm0[14],ymm4[15]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm3, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm1[1,1,1,2,5,5,5,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm0[0],ymm4[1],ymm0[2,3],ymm4[4],ymm0[5],ymm4[6],ymm0[7,8],ymm4[9],ymm0[10,11],ymm4[12],ymm0[13],ymm4[14],ymm0[15]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm3[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm2[3,2,3,3,7,6,7,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3,4],ymm0[5,6,7,8],ymm1[9],ymm0[10],ymm1[11,12],ymm0[13,14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm24
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm11
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm4, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[3,2,3,3,7,6,7,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm0[0],ymm1[1],ymm0[2],ymm1[3,4],ymm0[5,6,7,8],ymm1[9],ymm0[10],ymm1[11,12],ymm0[13,14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm14, %ymm13
+; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm2, %ymm0
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm16[1,1,2,2]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
-; AVX512F-SLOW-NEXT:    vprolq $16, %ymm4, %ymm1
+; AVX512F-SLOW-NEXT:    vprolq $16, %ymm2, %ymm1
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm16[0,1,2,1,4,5,6,5]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8,9],ymm2[10],ymm1[11],ymm2[12],ymm1[13,14],ymm2[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm16
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm9
-; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm14, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm22[3,0,3,0,7,4,7,4]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm9
+; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm6, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm27[3,0,3,0,7,4,7,4]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm14, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm22[1,1,1,2,5,5,5,6]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm6, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm27[1,1,1,2,5,5,5,6]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5],ymm2[6],ymm1[7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13],ymm2[14],ymm1[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm2
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 16(%r8), %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpandnq %ymm0, %ymm22, %ymm4
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpandn %ymm0, %ymm1, %ymm6
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm12, %ymm3
-; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm0, %ymm12
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm12, %zmm4
-; AVX512F-SLOW-NEXT:    vmovdqu (%rsp), %ymm2 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm2, %ymm12
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm31[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm14[0],ymm12[1],ymm14[2,3],ymm12[4],ymm14[5],ymm12[6],ymm14[7,8],ymm12[9],ymm14[10,11],ymm12[12],ymm14[13],ymm12[14],ymm14[15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm18[2,3,2,2]
-; AVX512F-SLOW-NEXT:    vprolq $16, %ymm2, %ymm14
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm31[0,1,2,1,4,5,6,5]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm0, %ymm14
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm27
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm14, %zmm6
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm11, %ymm14
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm28[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0],ymm14[1],ymm12[2,3],ymm14[4],ymm12[5],ymm14[6],ymm12[7,8],ymm14[9],ymm12[10,11],ymm14[12],ymm12[13],ymm14[14],ymm12[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm17[2,3,2,2]
+; AVX512F-SLOW-NEXT:    vprolq $16, %ymm11, %ymm14
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm28[0,1,2,1,4,5,6,5]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm14[0,1],ymm11[2],ymm14[3],ymm11[4],ymm14[5,6],ymm11[7],ymm14[8,9],ymm11[10],ymm14[11],ymm11[12],ymm14[13,14],ymm11[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm12, %zmm20
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm12, %zmm21
+; AVX512F-SLOW-NEXT:    vmovdqu (%rsp), %ymm11 # 32-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm11, %ymm12
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm2[3,0,3,0,7,4,7,4]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm31[3,0,3,0,7,4,7,4]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm14[0],ymm12[1],ymm14[2],ymm12[3],ymm14[4,5],ymm12[6],ymm14[7,8],ymm12[9],ymm14[10],ymm12[11],ymm14[12,13],ymm12[14],ymm14[15]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm11, %ymm6
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm17[0,2,1,3]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm11, %ymm5
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm18[0,2,1,3]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,4,5,6]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm2[1,1,1,2,5,5,5,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0],ymm9[1],ymm6[2,3],ymm9[4],ymm6[5],ymm9[6],ymm6[7,8],ymm9[9],ymm6[10,11],ymm9[12],ymm6[13],ymm9[14],ymm6[15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm31[1,1,1,2,5,5,5,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm9[1],ymm5[2,3],ymm9[4],ymm5[5],ymm9[6],ymm5[7,8],ymm9[9],ymm5[10,11],ymm9[12],ymm5[13],ymm9[14],ymm5[15]
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm17 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm17 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
 ; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm9, %zmm18, %zmm17
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 88(%r8), %ymm9
-; AVX512F-SLOW-NEXT:    vpbroadcastq 96(%r8), %ymm19
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm9, %zmm9
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm17, %zmm19, %zmm9
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm17 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpbroadcastq 96(%r8), %ymm20
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm20, %zmm9, %zmm9
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm17, %zmm20, %zmm9
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm11, %zmm17
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm13, %zmm13
 ; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm17, %zmm18, %zmm13
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 24(%r8), %ymm14
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 32(%r8), %ymm17
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm14, %zmm14
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm13, %zmm19, %zmm14
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm21[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm13, %zmm20, %zmm14
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm22[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vpermq $4, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm17 = mem[0,1,0,0]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm12, %zmm6
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm12, %zmm5
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 80(%r8), %ymm12
-; AVX512F-SLOW-NEXT:    vpandnq %ymm12, %ymm22, %ymm2
+; AVX512F-SLOW-NEXT:    vpandn %ymm12, %ymm1, %ymm1
 ; AVX512F-SLOW-NEXT:    vmovdqa 64(%r8), %ymm12
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm12, %ymm11
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm11
+; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm12, %ymm11
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm19 = ymm29[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm21 = ymm28[0,1,0,0]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm25[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm24[0,1,0,0]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm27[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[2,3,2,2]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm25[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,3,2]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm19[2,3,2,2]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm15[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm26[2,2,3,2]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,3,2,2]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm24[2,2,3,2]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm11, %zmm2
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,3,2]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm11, %zmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0]
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm25 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm26 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm19 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm29 # 64-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm11, %zmm25, %zmm24
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm11, %zmm26, %zmm10
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm11, %zmm19, %zmm26
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm11, %zmm29, %zmm10
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm13, %zmm11
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm11 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpbroadcastq 64(%r8), %ymm17
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm17, %zmm12
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm11, %zmm17, %zmm12
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm21, %zmm19, %zmm11
-; AVX512F-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm11 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpbroadcastq (%r8), %ymm19
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm19, %zmm0
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm11, %zmm17, %zmm0
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm22, %zmm11
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm27, %zmm15
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm11, %zmm13, %zmm15
-; AVX512F-SLOW-NEXT:    vpbroadcastq 112(%r8), %ymm11
-; AVX512F-SLOW-NEXT:    vpbroadcastq 120(%r8), %ymm17
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm11, %zmm11
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm15, %zmm17, %zmm11
+; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm13 = mem[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm11, %zmm17, %zmm13
+; AVX512F-SLOW-NEXT:    vpbroadcastq 64(%r8), %ymm11
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm11
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm13, %zmm12, %zmm11
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm20, %zmm13
+; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm19 = mem[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm13, %zmm17, %zmm19
+; AVX512F-SLOW-NEXT:    vpbroadcastq (%r8), %ymm13
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm13, %zmm0
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm19, %zmm12, %zmm0
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm23, %zmm12
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm25, %zmm13
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm12, %zmm17, %zmm13
+; AVX512F-SLOW-NEXT:    vpbroadcastq 112(%r8), %ymm12
+; AVX512F-SLOW-NEXT:    vpbroadcastq 120(%r8), %ymm15
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm12, %zmm12
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm13, %zmm15, %zmm12
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm7
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm5, %zmm3
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm7, %zmm13, %zmm3
-; AVX512F-SLOW-NEXT:    vpbroadcastq 48(%r8), %ymm5
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm7, %zmm17, %zmm3
+; AVX512F-SLOW-NEXT:    vpbroadcastq 48(%r8), %ymm4
 ; AVX512F-SLOW-NEXT:    vpbroadcastq 56(%r8), %ymm7
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm17, %zmm5
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm16, %zmm18, %zmm1
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm20, %zmm18, %zmm6
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm4
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm15, %zmm4
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm16, %zmm18, %zmm2
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm21, %zmm18, %zmm5
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm3, %zmm1, %zmm4
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm3, %zmm6, %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, 384(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm4, 64(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm5, 256(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm11, 576(%r9)
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm3, %zmm2, %zmm6
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm3, %zmm5, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm1, 384(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm6, 64(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm4, 256(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm12, 576(%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, (%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm10, 192(%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm14, 128(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm12, 320(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm11, 320(%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm9, 448(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm24, 512(%r9)
-; AVX512F-SLOW-NEXT:    addq $712, %rsp # imm = 0x2C8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm26, 512(%r9)
+; AVX512F-SLOW-NEXT:    addq $648, %rsp # imm = 0x288
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
 ; AVX512F-FAST-LABEL: store_i16_stride5_vf64:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    subq $536, %rsp # imm = 0x218
-; AVX512F-FAST-NEXT:    vmovdqa 96(%rcx), %ymm11
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa %ymm1, %ymm8
+; AVX512F-FAST-NEXT:    subq $488, %rsp # imm = 0x1E8
+; AVX512F-FAST-NEXT:    vmovdqa 96(%rcx), %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa %ymm2, %ymm7
 ; AVX512F-FAST-NEXT:    vmovdqa64 96(%rdx), %ymm20
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm20[3,0,3,0,7,4,7,4]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
-; AVX512F-FAST-NEXT:    vmovdqa 96(%rcx), %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm2, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm26
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm20[3,0,3,0,7,4,7,4]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
+; AVX512F-FAST-NEXT:    vmovdqa 96(%rcx), %xmm9
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
+; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm9, %xmm2
 ; AVX512F-FAST-NEXT:    vmovdqa 96(%rdx), %xmm15
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm15[1,2,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,0]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 96(%rsi), %ymm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm3 = xmm15[1,2,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3],xmm2[4,5],xmm3[6],xmm2[7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,0]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 96(%rsi), %ymm8
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa %ymm2, %ymm10
-; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %ymm5
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm5[1,1,2,2]
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm8, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa %ymm2, %ymm11
+; AVX512F-FAST-NEXT:    vmovdqa64 96(%rdi), %ymm23
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm23[1,1,2,2]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
 ; AVX512F-FAST-NEXT:    vmovdqa 96(%rsi), %xmm4
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm27
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm31
 ; AVX512F-FAST-NEXT:    vpbroadcastq 104(%rdi), %xmm4
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2,3],xmm4[4],xmm2[5],xmm4[6],xmm2[7]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm22
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm16
 ; AVX512F-FAST-NEXT:    vmovdqa 96(%r8), %ymm1
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm4, %ymm16
+; AVX512F-FAST-NEXT:    vmovdqa %ymm4, %ymm14
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,1,1]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpandn %ymm1, %ymm4, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
+; AVX512F-FAST-NEXT:    vpandn %ymm1, %ymm13, %ymm1
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rdx), %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rcx), %xmm2
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm4
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm2, %xmm2
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,2,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,0]
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm1
 ; AVX512F-FAST-NEXT:    vmovdqa 64(%rsi), %xmm2
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm6
-; AVX512F-FAST-NEXT:    vpbroadcastq 72(%rdi), %xmm7
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm7[1],xmm6[2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7]
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm7
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
-; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm2, %xmm2
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,0,1],zmm6[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm4
+; AVX512F-FAST-NEXT:    vpbroadcastq 72(%rdi), %xmm5
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5],xmm5[6],xmm4[7]
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm5
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm2, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa %xmm5, %xmm10
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdx), %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rcx), %xmm6
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
-; AVX512F-FAST-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm6, %xmm6
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,2,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm6[0],xmm2[1],xmm6[2],xmm2[3],xmm6[4,5],xmm2[6],xmm6[7]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm2
-; AVX512F-FAST-NEXT:    vpbroadcastq 8(%rdi), %xmm6
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm2[0],xmm6[1],xmm2[2,3],xmm6[4],xmm2[5],xmm6[6],xmm2[7]
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,0,1],zmm6[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa64 32(%rdx), %ymm27
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm27[3,0,3,0,7,4,7,4]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm5[0],ymm4[1],ymm5[2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7,8],ymm4[9],ymm5[10],ymm4[11],ymm5[12,13],ymm4[14],ymm5[15]
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm4
+; AVX512F-FAST-NEXT:    vpbroadcastq 8(%rdi), %xmm5
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5],xmm5[6],xmm4[7]
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm5
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
+; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm2, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 32(%rdx), %ymm25
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm25[3,0,3,0,7,4,7,4]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm6[0],ymm1[1],ymm6[2],ymm1[3],ymm6[4,5],ymm1[6],ymm6[7,8],ymm1[9],ymm6[10],ymm1[11],ymm6[12,13],ymm1[14],ymm6[15]
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm8
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm12
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm12, %xmm6
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm8[1,2,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm7[1],xmm6[2],xmm7[3],xmm6[4,5],xmm7[6],xmm6[7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,0]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm6, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm10
+; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm10, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm5
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm5[1,2,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4,5],xmm4[6],xmm1[7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,0]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm9
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm7
-; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm7, %ymm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm9[1,1,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm6[0],ymm1[1],ymm6[2,3],ymm1[4],ymm6[5],ymm1[6],ymm6[7,8],ymm1[9],ymm6[10,11],ymm1[12],ymm6[13],ymm1[14],ymm6[15]
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm13
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm13, %xmm3
-; AVX512F-FAST-NEXT:    vpbroadcastq 40(%rdi), %xmm10
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm10[1],xmm3[2,3],xmm10[4],xmm3[5],xmm10[6],xmm3[7]
+; AVX512F-FAST-NEXT:    vmovdqa64 32(%rdi), %ymm30
+; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm7, %ymm1
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm30[1,1,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm4[0],ymm1[1],ymm4[2,3],ymm1[4],ymm4[5],ymm1[6],ymm4[7,8],ymm1[9],ymm4[10,11],ymm1[12],ymm4[13],ymm1[14],ymm4[15]
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm11
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm11, %xmm3
+; AVX512F-FAST-NEXT:    vpbroadcastq 40(%rdi), %xmm4
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5],xmm4[6],xmm3[7]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm24
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm18
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm1
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[0,1,1,1]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm16, %ymm6
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vpandn %ymm3, %ymm4, %ymm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm1
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm1, %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm4
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm18
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm4[1,2,2,2]
+; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vpandn %ymm3, %ymm13, %ymm3
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm29
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm3
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm4
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,2,2,2]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2],xmm1[3],xmm3[4,5],xmm1[6],xmm3[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm5[0,1,2,1,4,5,6,5]
-; AVX512F-FAST-NEXT:    vprolq $16, %ymm0, %ymm3
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm17
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm23[0,1,2,1,4,5,6,5]
+; AVX512F-FAST-NEXT:    vprolq $16, %ymm8, %ymm3
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1],ymm1[2],ymm3[3],ymm1[4],ymm3[5,6],ymm1[7],ymm3[8,9],ymm1[10],ymm3[11],ymm1[12],ymm3[13,14],ymm1[15]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29>
-; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm0
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm5[2,3,2,3,6,7,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8],ymm1[9],ymm0[10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25>
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm16
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm20[1,1,1,2,5,5,5,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5],ymm1[6],ymm0[7,8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13],ymm1[14],ymm0[15]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31>
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm0
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm20[3,2,3,3,7,6,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3,4],ymm0[5,6,7,8],ymm3[9],ymm0[10],ymm3[11,12],ymm0[13,14,15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29>
+; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm8, %ymm1
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm23[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4,5],ymm3[6],ymm1[7,8],ymm3[9],ymm1[10],ymm3[11],ymm1[12,13],ymm3[14],ymm1[15]
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,u,u,22,23,22,23,u,u,20,21,u,u,24,25>
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm0, %ymm1
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm20[1,1,1,2,5,5,5,6]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5],ymm3[6],ymm1[7,8],ymm3[9],ymm1[10,11],ymm3[12],ymm1[13],ymm3[14],ymm1[15]
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31>
+; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm20[3,2,3,3,7,6,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3,4],ymm0[5,6,7,8],ymm1[9],ymm0[10],ymm1[11,12],ymm0[13,14,15]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa64 64(%rdx), %ymm20
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rcx), %ymm5
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm0
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm20[3,2,3,3,7,6,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2],ymm4[3,4],ymm0[5,6,7,8],ymm4[9],ymm0[10],ymm4[11,12],ymm0[13,14,15]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm26, %xmm0
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
-; AVX512F-FAST-NEXT:    vmovdqa64 64(%rdi), %ymm26
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rsi), %ymm3
-; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm3, %ymm4
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm26[2,3,2,3,6,7,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm4[0],ymm11[1],ymm4[2],ymm11[3],ymm4[4,5],ymm11[6],ymm4[7,8],ymm11[9],ymm4[10],ymm11[11],ymm4[12,13],ymm11[14],ymm4[15]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %xmm4
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm27, %xmm0
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm9[0,1,2,1,4,5,6,5]
-; AVX512F-FAST-NEXT:    vprolq $16, %ymm7, %ymm14
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm14[0,1],ymm11[2],ymm14[3],ymm11[4],ymm14[5,6],ymm11[7],ymm14[8,9],ymm11[10],ymm14[11],ymm11[12],ymm14[13,14],ymm11[15]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm7, %ymm7
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[2,3,2,3,6,7,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm7[0],ymm9[1],ymm7[2],ymm9[3],ymm7[4,5],ymm9[6],ymm7[7,8],ymm9[9],ymm7[10],ymm9[11],ymm7[12,13],ymm9[14],ymm7[15]
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rcx), %ymm12
+; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm12, %ymm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm20[3,2,3,3,7,6,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3,4],ymm0[5,6,7,8],ymm1[9],ymm0[10],ymm1[11,12],ymm0[13,14,15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm26
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3]
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %ymm15
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rsi), %ymm9
+; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm9, %ymm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm15[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8],ymm1[9],ymm0[10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm28
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm16, %ymm14
-; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm2, %ymm7
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm25[1,1,1,2,5,5,5,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm7[0],ymm9[1],ymm7[2,3],ymm9[4],ymm7[5],ymm9[6],ymm7[7,8],ymm9[9],ymm7[10,11],ymm9[12],ymm7[13],ymm9[14],ymm7[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm29
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm25[3,2,3,3,7,6,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm7[1],ymm2[2],ymm7[3,4],ymm2[5,6,7,8],ymm7[9],ymm2[10],ymm7[11,12],ymm2[13,14,15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm30
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm15
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm15, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 (%rdx), %ymm25
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm25[3,2,3,3,7,6,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm2[1],ymm1[2],ymm2[3,4],ymm1[5,6,7,8],ymm2[9],ymm1[10],ymm2[11,12],ymm1[13,14,15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm31
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm12[0],xmm8[0],xmm12[1],xmm8[1],xmm12[2],xmm8[2],xmm12[3],xmm8[3]
+; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm31, %xmm1
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm0 = ymm30[0,1,2,1,4,5,6,5]
+; AVX512F-FAST-NEXT:    vprolq $16, %ymm7, %ymm8
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0,1],ymm0[2],ymm8[3],ymm0[4],ymm8[5,6],ymm0[7],ymm8[8,9],ymm0[10],ymm8[11],ymm0[12],ymm8[13,14],ymm0[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm25
+; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm7, %ymm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm30[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm7[1],ymm0[2],ymm7[3],ymm0[4,5],ymm7[6],ymm0[7,8],ymm7[9],ymm0[10],ymm7[11],ymm0[12,13],ymm7[14],ymm0[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm24
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm2, %ymm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm27[1,1,1,2,5,5,5,6]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm7[1],ymm0[2,3],ymm7[4],ymm0[5],ymm7[6],ymm0[7,8],ymm7[9],ymm0[10,11],ymm7[12],ymm0[13],ymm7[14],ymm0[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm22
+; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm2, %ymm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm27[3,2,3,3,7,6,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm0[0],ymm2[1],ymm0[2],ymm2[3,4],ymm0[5,6,7,8],ymm2[9],ymm0[10],ymm2[11,12],ymm0[13,14,15]
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa64 (%rdx), %ymm27
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm27[3,2,3,3,7,6,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm6[1],ymm2[2],ymm6[3,4],ymm2[5,6,7,8],ymm6[9],ymm2[10],ymm6[11,12],ymm2[13,14,15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm21
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm10[0],xmm5[0],xmm10[1],xmm5[1],xmm10[2],xmm5[2],xmm10[3],xmm5[3]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm12
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm18, %xmm1
-; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm6, %xmm8
-; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa64 (%rdi), %ymm27
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm27[2,3,2,3,6,7,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm6[1],ymm2[2],ymm6[3],ymm2[4,5],ymm6[6],ymm2[7,8],ymm6[9],ymm2[10],ymm6[11],ymm2[12,13],ymm6[14],ymm2[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm17
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm6
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3]
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm7
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm19
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm5
+; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm5, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa64 (%rdi), %ymm23
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm10 = ymm23[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm14 = ymm4[0],ymm10[1],ymm4[2],ymm10[3],ymm4[4,5],ymm10[6],ymm4[7,8],ymm10[9],ymm4[10],ymm10[11],ymm4[12,13],ymm10[14],ymm4[15]
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm4
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
-; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm6
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm6
 ; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm10, %xmm10
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm0, %ymm4
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm27[1,1,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm13[0],ymm4[1],ymm13[2,3],ymm4[4],ymm13[5],ymm4[6],ymm13[7,8],ymm4[9],ymm13[10,11],ymm4[12],ymm13[13],ymm4[14],ymm13[15]
-; AVX512F-FAST-NEXT:    vprolq $16, %ymm0, %ymm0
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm27[0,1,2,1,4,5,6,5]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm13[2],ymm0[3],ymm13[4],ymm0[5,6],ymm13[7],ymm0[8,9],ymm13[10],ymm0[11],ymm13[12],ymm0[13,14],ymm13[15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm1
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm23[1,1,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm11[0],ymm1[1],ymm11[2,3],ymm1[4],ymm11[5],ymm1[6],ymm11[7,8],ymm1[9],ymm11[10,11],ymm1[12],ymm11[13],ymm1[14],ymm11[15]
+; AVX512F-FAST-NEXT:    vprolq $16, %ymm5, %ymm5
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm23[0,1,2,1,4,5,6,5]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm11[2],ymm5[3],ymm11[4],ymm5[5,6],ymm11[7],ymm5[8,9],ymm11[10],ymm5[11],ymm11[12],ymm5[13,14],ymm11[15]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm23
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm1
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm27[3,0,3,0,7,4,7,4]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm5[0],ymm1[1],ymm5[2],ymm1[3],ymm5[4,5],ymm1[6],ymm5[7,8],ymm1[9],ymm5[10],ymm1[11],ymm5[12,13],ymm1[14],ymm5[15]
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm27[1,1,1,2,5,5,5,6]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3],ymm5[4],ymm0[5],ymm5[6],ymm0[7,8],ymm5[9],ymm0[10,11],ymm5[12],ymm0[13],ymm5[14],ymm0[15]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm21
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm15, %ymm0
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm25[3,0,3,0,7,4,7,4]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm13[0],ymm0[1],ymm13[2],ymm0[3],ymm13[4,5],ymm0[6],ymm13[7,8],ymm0[9],ymm13[10],ymm0[11],ymm13[12,13],ymm0[14],ymm13[15]
-; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm15, %ymm13
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm16, %ymm7
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm25[1,1,1,2,5,5,5,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm15[1],ymm13[2,3],ymm15[4],ymm13[5],ymm15[6],ymm13[7,8],ymm15[9],ymm13[10,11],ymm15[12],ymm13[13],ymm15[14],ymm13[15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm25
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm27
 ; AVX512F-FAST-NEXT:    vpbroadcastq 16(%r8), %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpandn %ymm0, %ymm13, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm15
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm14
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm14, %zmm4
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm3, %ymm14
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm26[1,1,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0],ymm14[1],ymm11[2,3],ymm14[4],ymm11[5],ymm14[6],ymm11[7,8],ymm14[9],ymm11[10,11],ymm14[12],ymm11[13],ymm14[14],ymm11[15]
-; AVX512F-FAST-NEXT:    vprolq $16, %ymm3, %ymm3
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm14 = ymm26[0,1,2,1,4,5,6,5]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm14[2],ymm3[3],ymm14[4],ymm3[5,6],ymm14[7],ymm3[8,9],ymm14[10],ymm3[11],ymm14[12],ymm3[13,14],ymm14[15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermq $4, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm14 = mem[0,1,0,0]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm19[0,1,0,0]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm11, %zmm18
-; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm5, %ymm11
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm20[3,0,3,0,7,4,7,4]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0],ymm11[1],ymm9[2],ymm11[3],ymm9[4,5],ymm11[6],ymm9[7,8],ymm11[9],ymm9[10],ymm11[11],ymm9[12,13],ymm11[14],ymm9[15]
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm5, %ymm5
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm15[0,1,1,1]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm31 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpandnq %ymm0, %ymm31, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm5
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm11
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm11, %zmm30
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm9, %ymm0
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm15[1,1,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm11[0],ymm0[1],ymm11[2,3],ymm0[4],ymm11[5],ymm0[6],ymm11[7,8],ymm0[9],ymm11[10,11],ymm0[12],ymm11[13],ymm0[14],ymm11[15]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm17[0,1,0,0]
+; AVX512F-FAST-NEXT:    vprolq $16, %ymm9, %ymm9
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[0,1,2,1,4,5,6,5]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0,1],ymm15[2],ymm9[3],ymm15[4],ymm9[5,6],ymm15[7],ymm9[8,9],ymm15[10],ymm9[11],ymm15[12],ymm9[13,14],ymm15[15]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm12, %ymm9
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm20[3,0,3,0,7,4,7,4]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm15[0],ymm9[1],ymm15[2],ymm9[3],ymm15[4,5],ymm9[6],ymm15[7,8],ymm9[9],ymm15[10],ymm9[11],ymm15[12,13],ymm9[14],ymm15[15]
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm12, %ymm3
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,1,1]
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm20[1,1,1,2,5,5,5,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm15[1],ymm5[2,3],ymm15[4],ymm5[5],ymm15[6],ymm5[7,8],ymm15[9],ymm5[10,11],ymm15[12],ymm5[13],ymm15[14],ymm5[15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm9, %zmm5
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm15[1],ymm3[2,3],ymm15[4],ymm3[5],ymm15[6],ymm3[7,8],ymm15[9],ymm3[10,11],ymm15[12],ymm3[13],ymm15[14],ymm3[15]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm9, %zmm3
 ; AVX512F-FAST-NEXT:    vmovdqa 64(%r8), %ymm9
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm9[0,1,1,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm9, %ymm9
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm9, %ymm9
 ; AVX512F-FAST-NEXT:    vpbroadcastq 80(%r8), %ymm20
-; AVX512F-FAST-NEXT:    vpandnq %ymm20, %ymm13, %ymm13
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm9, %zmm9
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0]
-; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm22 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm24 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm13, %zmm22, %zmm3
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm13, %zmm24, %zmm0
-; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm13 = mem[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpandnq %ymm20, %ymm31, %ymm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm9, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0]
+; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm16 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm18 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm9, %zmm16, %zmm20
+; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm9, %zmm18, %zmm29
+; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm9 = mem[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpermq $174, {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # ymm16 = mem[2,3,2,2]
-; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm19 = mem[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpermq $186, {{[-0-9]+}}(%r{{[sb]}}p), %ymm20 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm20 = mem[2,2,3,2]
-; AVX512F-FAST-NEXT:    vpermq $186, {{[-0-9]+}}(%r{{[sb]}}p), %ymm22 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm22 = mem[2,2,3,2]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermq $174, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm24 = mem[2,3,2,2]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm27 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm27 = mem[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm17 = mem[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpermq $186, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm18 = mem[2,2,3,2]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm26[2,2,3,2]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm7[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[2,3,2,2]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm29 = ymm29[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm30[2,2,3,2]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm31 = ymm31[2,2,3,2]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm23[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm17[2,3,2,2]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm6[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm31 = ymm25[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm24[2,3,2,2]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm22[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,3,2]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm21[2,2,3,2]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm19[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,3,2,2]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,1,0,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm12, %zmm12
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14, %zmm12 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpbroadcastq 64(%r8), %ymm17
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm17, %zmm15
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm12, %zmm17, %zmm15
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm26, %zmm1, %zmm1
-; AVX512F-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14, %zmm1 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpbroadcastq (%r8), %ymm12
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm12, %zmm11
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm17, %zmm11
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm13, %zmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm19, %zmm12
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm14, %zmm12
-; AVX512F-FAST-NEXT:    vpbroadcastq 112(%r8), %ymm1
-; AVX512F-FAST-NEXT:    vpbroadcastq 120(%r8), %ymm13
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm1, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm12, %zmm13, %zmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm22, %zmm8
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm24, %zmm6
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm8, %zmm12, %zmm6
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm28, %zmm27, %zmm8
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm29, %zmm16
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm8, %zmm14, %zmm16
-; AVX512F-FAST-NEXT:    vpbroadcastq 48(%r8), %ymm8
-; AVX512F-FAST-NEXT:    vpbroadcastq 56(%r8), %ymm14
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm8, %zmm8
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm16, %zmm13, %zmm8
-; AVX512F-FAST-NEXT:    vpbroadcastq 88(%r8), %ymm13
-; AVX512F-FAST-NEXT:    vpbroadcastq 96(%r8), %ymm14
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm13, %zmm13
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm14, %zmm13
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm31, %zmm6
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm2, %zmm2
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm12, %zmm2
-; AVX512F-FAST-NEXT:    vpbroadcastq 24(%r8), %ymm6
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, (%rsp), %zmm19, %zmm19 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm21 = mem[0,1,0,1,4,5,4,5]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm19, %zmm22, %zmm21
+; AVX512F-FAST-NEXT:    vpbroadcastq 64(%r8), %ymm19
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm19, %zmm15
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm21, %zmm19, %zmm15
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm21, %zmm11
+; AVX512F-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm21 = mem[0,1,0,1,4,5,4,5]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm11, %zmm22, %zmm21
+; AVX512F-FAST-NEXT:    vpbroadcastq (%r8), %ymm11
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm11, %zmm5
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm21, %zmm19, %zmm5
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm9, %zmm9
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm17, %zmm11
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm9, %zmm22, %zmm11
+; AVX512F-FAST-NEXT:    vpbroadcastq 112(%r8), %ymm9
+; AVX512F-FAST-NEXT:    vpbroadcastq 120(%r8), %ymm16
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm9, %zmm9
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm11, %zmm16, %zmm9
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm26, %zmm2
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm28, %zmm4
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm11, %zmm4
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm31, %zmm2
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm12, %zmm8
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm22, %zmm8
+; AVX512F-FAST-NEXT:    vpbroadcastq 48(%r8), %ymm2
+; AVX512F-FAST-NEXT:    vpbroadcastq 56(%r8), %ymm12
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm2, %zmm2
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm8, %zmm16, %zmm2
+; AVX512F-FAST-NEXT:    vpbroadcastq 88(%r8), %ymm8
+; AVX512F-FAST-NEXT:    vpbroadcastq 96(%r8), %ymm12
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm8, %zmm8
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm12, %zmm8
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm4
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm14, %zmm6
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm11, %zmm6
+; AVX512F-FAST-NEXT:    vpbroadcastq 24(%r8), %ymm4
 ; AVX512F-FAST-NEXT:    vpbroadcastq 32(%r8), %ymm7
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm14, %zmm6
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm21, %zmm12, %zmm25
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm18, %zmm12, %zmm5
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm2, %zmm25, %zmm4
-; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm2, %zmm5, %zmm9
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, 384(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, 64(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, 128(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, 256(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm13, 448(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 576(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm11, (%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, 192(%r9)
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm4
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm12, %zmm4
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm23, %zmm11, %zmm27
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm11, %zmm3
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm0, %zmm27, %zmm30
+; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm0, %zmm3, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 384(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm30, 64(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, 128(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, 256(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, 448(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, 576(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, (%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm29, 192(%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm15, 320(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, 512(%r9)
-; AVX512F-FAST-NEXT:    addq $536, %rsp # imm = 0x218
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm20, 512(%r9)
+; AVX512F-FAST-NEXT:    addq $488, %rsp # imm = 0x1E8
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
index 0bac0b672cf9c..71505f5912548 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
@@ -184,11 +184,12 @@ define void @store_i16_stride6_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX512BW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],mem[0],xmm2[1],mem[1]
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,2,4,6,16,18,1,3,5,7,17,19,u,u,u,u>
-; AVX512BW-NEXT:    vpermi2w %ymm1, %ymm0, %ymm2
-; AVX512BW-NEXT:    vextracti128 $1, %ymm2, %xmm0
-; AVX512BW-NEXT:    vmovq %xmm0, 16(%rax)
-; AVX512BW-NEXT:    vmovdqa %xmm2, (%rax)
+; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,2,4,6,8,10,1,3,5,7,9,11,u,u,u,u>
+; AVX512BW-NEXT:    vpermw %ymm0, %ymm1, %ymm0
+; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512BW-NEXT:    vmovq %xmm1, 16(%rax)
+; AVX512BW-NEXT:    vmovdqa %xmm0, (%rax)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %in.vec0 = load <2 x i16>, ptr %in.vecptr0, align 64
@@ -456,10 +457,11 @@ define void @store_i16_stride6_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,4,8,12,32,36,1,5,9,13,33,37,2,6,10,14,34,38,3,7,11,15,35,39,u,u,u,u,u,u,u,u>
-; AVX512BW-NEXT:    vpermi2w %zmm2, %zmm0, %zmm1
-; AVX512BW-NEXT:    vextracti32x4 $2, %zmm1, 32(%rax)
-; AVX512BW-NEXT:    vmovdqa %ymm1, (%rax)
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,4,8,12,16,20,1,5,9,13,17,21,2,6,10,14,18,22,3,7,11,15,19,23,u,u,u,u,u,u,u,u>
+; AVX512BW-NEXT:    vpermw %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vextracti32x4 $2, %zmm0, 32(%rax)
+; AVX512BW-NEXT:    vmovdqa %ymm0, (%rax)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %in.vec0 = load <4 x i16>, ptr %in.vecptr0, align 64
@@ -1561,10 +1563,11 @@ define void @store_i16_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[12],ymm5[12],ymm4[13],ymm5[13],ymm4[14],ymm5[14],ymm4[15],ymm5[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = <5,u,14,6,u,15,7,u>
 ; AVX512F-SLOW-NEXT:    vpermi2d %ymm7, %ymm8, %ymm9
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = [0,21,2,3,22,5,6,23]
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm1[4],ymm3[4],ymm1[5],ymm3[5],ymm1[6],ymm3[6],ymm1[7],ymm3[7],ymm1[12],ymm3[12],ymm1[13],ymm3[13],ymm1[14],ymm3[14],ymm1[15],ymm3[15]
-; AVX512F-SLOW-NEXT:    vpermi2d %zmm8, %zmm9, %zmm7
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm16
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm7
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = [8,21,10,11,22,13,14,23]
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm9 = ymm1[4],ymm3[4],ymm1[5],ymm3[5],ymm1[6],ymm3[6],ymm1[7],ymm3[7],ymm1[12],ymm3[12],ymm1[13],ymm3[13],ymm1[14],ymm3[14],ymm1[15],ymm3[15]
+; AVX512F-SLOW-NEXT:    vpermi2d %zmm9, %zmm7, %zmm8
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm16
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm6
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm7
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
@@ -1646,12 +1649,13 @@ define void @store_i16_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,1,8,u,0,9,u,3>
 ; AVX512F-FAST-NEXT:    vpermi2d %ymm13, %ymm12, %ymm15
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [16,1,2,17,4,5,18,7]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm13 = xmm9[1,2,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm0, %zmm12
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [16,9,10,17,12,13,18,15]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm15 = xmm9[1,2,2,3]
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm11[1,2,2,3]
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3]
-; AVX512F-FAST-NEXT:    vpermi2d %zmm0, %zmm15, %zmm12
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm14, %zmm0
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
+; AVX512F-FAST-NEXT:    vpermi2d %zmm0, %zmm12, %zmm13
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm14, %zmm0
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <1,u,10,2,u,11,3,u>
@@ -1663,11 +1667,12 @@ define void @store_i16_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <4,12,u,5,13,u,6,14>
 ; AVX512F-FAST-NEXT:    vpermi2d %ymm6, %ymm7, %ymm9
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,1,20,3,4,21,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm6
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [8,9,20,11,12,21,14,15]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm16, %ymm10
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm10[0],ymm1[0],ymm10[1],ymm1[1],ymm10[2],ymm1[2],ymm10[3],ymm1[3],ymm10[8],ymm1[8],ymm10[9],ymm1[9],ymm10[10],ymm1[10],ymm10[11],ymm1[11]
-; AVX512F-FAST-NEXT:    vpermi2d %zmm7, %zmm9, %zmm6
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm8, %zmm6
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm9 = ymm10[0],ymm1[0],ymm10[1],ymm1[1],ymm10[2],ymm1[2],ymm10[3],ymm1[3],ymm10[8],ymm1[8],ymm10[9],ymm1[9],ymm10[10],ymm1[10],ymm10[11],ymm1[11]
+; AVX512F-FAST-NEXT:    vpermi2d %zmm9, %zmm6, %zmm7
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm6
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
 ; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm3, %ymm8
 ; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm7
@@ -1686,10 +1691,11 @@ define void @store_i16_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[12],ymm5[12],ymm4[13],ymm5[13],ymm4[14],ymm5[14],ymm4[15],ymm5[15]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <5,u,14,6,u,15,7,u>
 ; AVX512F-FAST-NEXT:    vpermi2d %ymm2, %ymm3, %ymm4
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,21,2,3,22,5,6,23]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm2
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [8,21,10,11,22,13,14,23]
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm10[4],ymm1[4],ymm10[5],ymm1[5],ymm10[6],ymm1[6],ymm10[7],ymm1[7],ymm10[12],ymm1[12],ymm10[13],ymm1[13],ymm10[14],ymm1[14],ymm10[15],ymm1[15]
-; AVX512F-FAST-NEXT:    vpermi2d %zmm1, %zmm4, %zmm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm8, %zmm1
+; AVX512F-FAST-NEXT:    vpermi2d %zmm1, %zmm2, %zmm3
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm8, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 128(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, 64(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
@@ -3277,26 +3283,27 @@ define void @store_i16_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rsi), %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm24
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm23
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,1,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rsi), %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm10
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[8],ymm9[8],ymm10[9],ymm9[9],ymm10[10],ymm9[10],ymm10[11],ymm9[11]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rsi), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm14[0],ymm2[0],ymm14[1],ymm2[1],ymm14[2],ymm2[2],ymm14[3],ymm2[3],ymm14[8],ymm2[8],ymm14[9],ymm2[9],ymm14[10],ymm2[10],ymm14[11],ymm2[11]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm31
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rcx), %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm21
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm25
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm26
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,2,3,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rcx), %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm15[0],ymm14[0],ymm15[1],ymm14[1],ymm15[2],ymm14[2],ymm15[3],ymm14[3],ymm15[8],ymm14[8],ymm15[9],ymm14[9],ymm15[10],ymm14[10],ymm15[11],ymm14[11]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rcx), %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %ymm11
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm11[0],ymm8[0],ymm11[1],ymm8[1],ymm11[2],ymm8[2],ymm11[3],ymm8[3],ymm11[8],ymm8[8],ymm11[9],ymm8[9],ymm11[10],ymm8[10],ymm11[11],ymm8[11]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[1,0,2,2,5,4,6,6]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
@@ -3304,451 +3311,461 @@ define void @store_i16_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm12[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm6[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm5, %xmm3, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm22
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm3, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm27
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm1[0,1,2,3],zmm0[0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm24
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm23
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 = zmm1[0,1,2,3],zmm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm28
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm0[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm12[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm19 = ymm0[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm30
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm29
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[1,1,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm11
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm3[0],ymm11[0],ymm3[1],ymm11[1],ymm3[2],ymm11[2],ymm3[3],ymm11[3],ymm3[8],ymm11[8],ymm3[9],ymm11[9],ymm3[10],ymm11[10],ymm3[11],ymm11[11]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm0[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm17 = xmm0[1,2,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm0[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm15
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm15[4],xmm5[4],xmm15[5],xmm5[5],xmm15[6],xmm5[6],xmm15[7],xmm5[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm16 = xmm0[1,2,3,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[1,0,2,2,5,4,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm2, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm17[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm13, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm2, %zmm6 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[1,0,2,2,5,4,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm2, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm16[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm7, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm2, %zmm10 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r8), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm2[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm6, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm13[2],ymm7[3,4],ymm13[5],ymm7[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r8), %xmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm5, %xmm13, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5,6],ymm5[7]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 = zmm5[0,1,2,3],zmm7[0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm5 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm6 = ymm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm0[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[1],ymm5[1],ymm6[2],ymm5[2],ymm6[3],ymm5[3],ymm6[8],ymm5[8],ymm6[9],ymm5[9],ymm6[10],ymm5[10],ymm6[11],ymm5[11]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[1,2,3,3,5,6,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm11[2,1,2,3,6,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm3[2,1,2,3,6,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm6[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm6[0],ymm1[0],ymm6[1],ymm1[1],ymm6[2],ymm1[2],ymm6[3],ymm1[3],ymm6[8],ymm1[8],ymm6[9],ymm1[9],ymm6[10],ymm1[10],ymm6[11],ymm1[11]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm3[4],ymm11[4],ymm3[5],ymm11[5],ymm3[6],ymm11[6],ymm3[7],ymm11[7],ymm3[12],ymm11[12],ymm3[13],ymm11[13],ymm3[14],ymm11[14],ymm3[15],ymm11[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm2[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm10, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0,1],ymm7[2],ymm9[3,4],ymm7[5],ymm9[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r8), %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm7, %xmm13
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0],ymm13[1],ymm10[2,3],ymm13[4],ymm10[5,6],ymm13[7]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm10[0,1,2,3],zmm9[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm9 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm10 = ymm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm0[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm9 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[8],ymm9[8],ymm10[9],ymm9[9],ymm10[10],ymm9[10],ymm10[11],ymm9[11]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm4[2,1,2,3,6,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm3[2,1,2,3,6,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm10[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm10[0],ymm0[0],ymm10[1],ymm0[1],ymm10[2],ymm0[2],ymm10[3],ymm0[3],ymm10[8],ymm0[8],ymm10[9],ymm0[9],ymm10[10],ymm0[10],ymm10[11],ymm0[11]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r9), %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm0[2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,2,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm21 = ymm13[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[1,2,3,3,5,6,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[3,3,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm5, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm9, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm10, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm1, %zmm4 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm2, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm1, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm5[0],ymm3[1],ymm5[2,3],ymm3[4],ymm5[5,6],ymm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm4, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0],ymm3[1],ymm9[2,3],ymm3[4],ymm9[5,6],ymm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r9), %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm3[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm10[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm9
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm11 = zmm1[0,1,2,3],zmm3[0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm1 = ymm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm14[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm2 = ymm15[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm15[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm15[4],ymm14[4],ymm15[5],ymm14[5],ymm15[6],ymm14[6],ymm15[7],ymm14[7],ymm15[12],ymm14[12],ymm15[13],ymm14[13],ymm15[14],ymm14[14],ymm15[15],ymm14[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[1,2,3,3,5,6,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm9[2,1,2,3,6,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm10[2,1,2,3,6,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm5[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm5[0],ymm3[0],ymm5[1],ymm3[1],ymm5[2],ymm3[2],ymm5[3],ymm3[3],ymm5[8],ymm3[8],ymm5[9],ymm3[9],ymm5[10],ymm3[10],ymm5[11],ymm3[11]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm4[1,2],ymm2[3],ymm4[4,5],ymm2[6],ymm4[7]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm2[0,1,2,3],zmm9[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm2 = ymm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm8[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm4 = ymm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm11[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm4[0],ymm2[0],ymm4[1],ymm2[1],ymm4[2],ymm2[2],ymm4[3],ymm2[3],ymm4[8],ymm2[8],ymm4[9],ymm2[9],ymm4[10],ymm2[10],ymm4[11],ymm2[11]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm11[4],ymm8[4],ymm11[5],ymm8[5],ymm11[6],ymm8[6],ymm11[7],ymm8[7],ymm11[12],ymm8[12],ymm11[13],ymm8[13],ymm11[14],ymm8[14],ymm11[15],ymm8[15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm31[2,1,2,3,6,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm8[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm14[2,1,2,3,6,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm9[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm9 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[8],ymm8[8],ymm9[9],ymm8[9],ymm9[10],ymm8[10],ymm9[11],ymm8[11]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm3[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm8[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,3,6,7,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm3[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm4[1,2,3,3,5,6,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm9[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm9 = ymm14[4],ymm10[4],ymm14[5],ymm10[5],ymm14[6],ymm10[6],ymm14[7],ymm10[7],ymm14[12],ymm10[12],ymm14[13],ymm10[13],ymm14[14],ymm10[14],ymm14[15],ymm10[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm9[3,3,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm2, %zmm3 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm3, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm6[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm10[4],ymm9[4],ymm10[5],ymm9[5],ymm10[6],ymm9[6],ymm10[7],ymm9[7],ymm10[12],ymm9[12],ymm10[13],ymm9[13],ymm10[14],ymm9[14],ymm10[15],ymm9[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[3,3,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm1, %zmm2 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm2, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm12, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm12[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm1[0,1,2,3],zmm0[0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %xmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm12[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm0[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm1, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm23[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm24[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1,2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm2[0,1,2,3],zmm1[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm12[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm2[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm12[2,3,2,3,6,7,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm2[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm15[0],xmm5[0],xmm15[1],xmm5[1],xmm15[2],xmm5[2],xmm15[3],xmm5[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,0,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm15[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm3, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm29[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm30[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
 ; AVX512F-ONLY-SLOW-NEXT:    movw $9362, %ax # imm = 0x2492
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm13[2,1,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm2 = xmm13[0],zero,xmm13[1],zero,xmm13[2],zero,xmm13[3],zero
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm2, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r9), %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm1[0,1,2,3],zmm0[0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm0[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,2,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm2, %zmm3 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm3, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm7[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0],ymm2[1,2],ymm4[3],ymm2[4,5],ymm4[6],ymm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm4 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm4, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm3[0,1,2,3],zmm2[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm0[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm3, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,0,2,2]
 ; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm5, %ymm5
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm19[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm24[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm23[0,1,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm18[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,7,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm4, %zmm5 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm5, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm8[2,1,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm7[0],ymm4[1,2],ymm7[3],ymm4[4,5],ymm7[6],ymm4[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm7 = xmm22[0],zero,xmm22[1],zero,xmm22[2],zero,xmm22[3],zero
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm7, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1],ymm7[2],ymm5[3,4],ymm7[5],ymm5[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm5[0,1,2,3],zmm4[0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r9), %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm5[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm5[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm8[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,3,2,3,6,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm5[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm6[0,0,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm14, %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm7[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0],ymm4[1,2],ymm6[3],ymm4[4,5],ymm6[6],ymm4[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm6 = xmm27[0],zero,xmm27[1],zero,xmm27[2],zero,xmm27[3],zero
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm6, %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1],ymm6[2],ymm5[3,4],ymm6[5],ymm5[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm5[0,1,2,3],zmm4[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm6[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm5, %ymm5
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,2,2,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm12[0,0,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm15, %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[0,2,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,4,4,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm10, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm16, %zmm10, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm3, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm17, %zmm10, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm8, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm11, %zmm7, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm13, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm9, %zmm7, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm14, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm7, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm15, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm7, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm18, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm17, %zmm9, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm21, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm20, %zmm9, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm8, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm16, %zmm9, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm13, %zmm11
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm9, %zmm11
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm1, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm1, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 192(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 128(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 320(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 256(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 64(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 192(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 128(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 320(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 256(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 64(%rax)
 ; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
 ; AVX512F-ONLY-SLOW-NEXT:    retq
 ;
 ; AVX512F-ONLY-FAST-LABEL: store_i16_stride6_vf32:
 ; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <17,18,17,18,u,u,19,19,5,4,2,2,5,4,6,6>
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm1, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512F-ONLY-FAST-NEXT:    pushq %rax
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm12[0],ymm1[0],ymm12[1],ymm1[1],ymm12[2],ymm1[2],ymm12[3],ymm1[3],ymm12[8],ymm1[8],ymm12[9],ymm1[9],ymm12[10],ymm1[10],ymm12[11],ymm1[11]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %xmm6
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm30
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,1,1,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = <1,2,1,2,u,u,3,3,13,12,10,10,13,12,14,14>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm0, %zmm20, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,1,1,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %ymm10
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm5[0],ymm3[0],ymm5[1],ymm3[1],ymm5[2],ymm3[2],ymm5[3],ymm3[3],ymm5[8],ymm3[8],ymm5[9],ymm3[9],ymm5[10],ymm3[10],ymm5[11],ymm3[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm17
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm5[0],ymm10[0],ymm5[1],ymm10[1],ymm5[2],ymm10[2],ymm5[3],ymm10[3],ymm5[8],ymm10[8],ymm5[9],ymm10[9],ymm5[10],ymm10[10],ymm5[11],ymm10[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    movw $18724, %ax # imm = 0x4924
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [8,9,20,11,12,21,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm11[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm1, %zmm19, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm23 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [8,9,20,11,12,21,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm9[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm0, %zmm22, %zmm21
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm3, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [0,9,2,3,8,5,6,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm1, %ymm20, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm15[0],ymm1[0],ymm15[1],ymm1[1],ymm15[2],ymm1[2],ymm15[3],ymm1[3],ymm15[8],ymm1[8],ymm15[9],ymm1[9],ymm15[10],ymm1[10],ymm15[11],ymm1[11]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm6, %xmm18
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm17
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm3, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [0,9,2,3,8,5,6,11]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm16, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm28
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm1, %xmm27
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm29 = ymm0[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm5[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm28 = ymm0[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm10[4],xmm6[4],xmm10[5],xmm6[5],xmm10[6],xmm6[6],xmm10[7],xmm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm6, %xmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm1, %zmm4, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm4[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm0[2,2,2,2]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %xmm7
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm1[1,1,1,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm15 = ymm1[0],ymm13[0],ymm1[1],ymm13[1],ymm1[2],ymm13[2],ymm1[3],ymm13[3],ymm1[8],ymm13[8],ymm1[9],ymm13[9],ymm1[10],ymm13[10],ymm1[11],ymm13[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm14, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm14, %zmm0 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm15 = ymm14[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm15, %zmm0, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm15, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm12, %ymm20, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm19 = zmm0[0,1,2,3],zmm19[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm12, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm0[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm4[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm21 = ymm9[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm9 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm2 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm3 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm8[0],ymm0[0],ymm8[1],ymm0[1],ymm8[2],ymm0[2],ymm8[3],ymm0[3],ymm8[8],ymm0[8],ymm8[9],ymm0[9],ymm8[10],ymm0[10],ymm8[11],ymm0[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [5,6,5,6,5,6,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm9, %ymm8, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm1[4],ymm13[4],ymm1[5],ymm13[5],ymm1[6],ymm13[6],ymm1[7],ymm13[7],ymm1[12],ymm13[12],ymm1[13],ymm13[13],ymm1[14],ymm13[14],ymm1[15],ymm13[15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[3,3,3,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm3, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm13 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [8,21,10,11,20,13,14,23]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm14, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm0, %zmm23, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm14[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [12,1,2,13,4,5,14,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm26, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm3[4],ymm9[4],ymm3[5],ymm9[5],ymm3[6],ymm9[6],ymm3[7],ymm9[7],ymm3[12],ymm9[12],ymm3[13],ymm9[13],ymm3[14],ymm9[14],ymm3[15],ymm9[15]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm0, %ymm8, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm8 = ymm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm9[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm9 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[8],ymm8[8],ymm9[9],ymm8[9],ymm9[10],ymm8[10],ymm9[11],ymm8[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm9[0],ymm2[1],ymm9[1],ymm2[2],ymm9[2],ymm2[3],ymm9[3],ymm2[8],ymm9[8],ymm2[9],ymm9[9],ymm2[10],ymm9[10],ymm2[11],ymm9[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm17 = ymm14[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm14 = ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm25 = ymm4[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm8[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm14[3,3,3,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm4, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm2, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm8 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm0, %zmm8, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm11[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm26, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm5, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm0[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm19 = ymm0[1,1,1,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm0[0],ymm11[0],ymm0[1],ymm11[1],ymm0[2],ymm11[2],ymm0[3],ymm11[3],ymm0[8],ymm11[8],ymm0[9],ymm11[9],ymm0[10],ymm11[10],ymm0[11],ymm11[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm2, %zmm20, %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm19, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm2, %zmm25 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm2[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm8, %zmm25, %zmm22
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm8, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm14, %ymm16, %ymm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm14, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm3[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [5,6,5,6,5,6,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm15[4],ymm1[4],ymm15[5],ymm1[5],ymm15[6],ymm1[6],ymm15[7],ymm1[7],ymm15[12],ymm1[12],ymm15[13],ymm1[13],ymm15[14],ymm1[14],ymm15[15],ymm1[15]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm3, %ymm24, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm1 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm15 = ymm15[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm15[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm15[0],ymm1[0],ymm15[1],ymm1[1],ymm15[2],ymm1[2],ymm15[3],ymm1[3],ymm15[8],ymm1[8],ymm15[9],ymm1[9],ymm15[10],ymm1[10],ymm15[11],ymm1[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm1[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm13[0],ymm3[0],ymm13[1],ymm3[1],ymm13[2],ymm3[2],ymm13[3],ymm3[3],ymm13[8],ymm3[8],ymm13[9],ymm3[9],ymm13[10],ymm3[10],ymm13[11],ymm3[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm11[4],ymm0[5],ymm11[5],ymm0[6],ymm11[6],ymm0[7],ymm11[7],ymm0[12],ymm11[12],ymm0[13],ymm11[13],ymm0[14],ymm11[14],ymm0[15],ymm11[15]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[3,3,3,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm15, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm11, %zmm15 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [8,21,10,11,20,13,14,23]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm2, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm3, %zmm16, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm31 = [12,1,2,13,4,5,14,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm2, %ymm31, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm12[4],ymm3[4],ymm12[5],ymm3[5],ymm12[6],ymm3[6],ymm12[7],ymm3[7],ymm12[12],ymm3[12],ymm12[13],ymm3[13],ymm12[14],ymm3[14],ymm12[15],ymm3[15]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm2, %ymm24, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm11[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm24 = ymm13[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm13 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm12 = ymm12[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm12[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm12 = ymm12[0],ymm13[0],ymm12[1],ymm13[1],ymm12[2],ymm13[2],ymm12[3],ymm13[3],ymm12[8],ymm13[8],ymm12[9],ymm13[9],ymm12[10],ymm13[10],ymm12[11],ymm13[11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm10, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm13[0],ymm1[1],ymm13[1],ymm1[2],ymm13[2],ymm1[3],ymm13[3],ymm1[8],ymm13[8],ymm1[9],ymm13[9],ymm1[10],ymm13[10],ymm1[11],ymm13[11]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm5[4],ymm10[4],ymm5[5],ymm10[5],ymm5[6],ymm10[6],ymm5[7],ymm10[7],ymm5[12],ymm10[12],ymm5[13],ymm10[13],ymm5[14],ymm10[14],ymm5[15],ymm10[15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm11, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm10[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[3,3,3,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm12, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm2, %zmm5 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm9, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm0, %zmm5, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm11, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm29 = ymm1[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm9[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm1, %ymm31, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm4, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm31 = ymm1[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm16, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm10[0],xmm2[0],xmm10[1],xmm2[1],xmm10[2],xmm2[2],xmm10[3],xmm2[3]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [1,0,2,2,1,0,2,2]
-; AVX512F-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm9, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm17, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
 ; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm10[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm7, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm6, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [1,0,2,2,1,0,2,2]
+; AVX512F-ONLY-FAST-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm13, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm2, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm7, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm6, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm5[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm6[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    movw $9362, %ax # imm = 0x2492
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm2, %zmm1 {%k1}
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [16,9,10,17,12,13,18,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm15[2,1,3,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm6 = xmm15[0],zero,xmm15[1],zero,xmm15[2],zero,xmm15[3],zero
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm12[0,0,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm5, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm12, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm12, %ymm9, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm12 = xmm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm14 = xmm15[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm14[0],xmm12[0],xmm14[1],xmm12[1],xmm14[2],xmm12[2],xmm14[3],xmm12[3]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm12, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm30, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm11, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm15, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm15[0],xmm11[0],xmm15[1],xmm11[1],xmm15[2],xmm11[2],xmm15[3],xmm11[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm9, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm14[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm12, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm9, %zmm4 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm31, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm11[2,1,3,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm9, %zmm4, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm9 = xmm31[0],zero,xmm31[1],zero,xmm31[2],zero,xmm31[3],zero
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm27, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm11[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm8[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm4, %zmm2, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm4 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm14[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm6, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm10[0],xmm3[0],xmm10[1],xmm3[1],xmm10[2],xmm3[2],xmm10[3],xmm3[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm7, %ymm13, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm14, %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm14 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm10 = xmm10[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm10, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm3, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm11, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm11[0],xmm3[0],xmm11[1],xmm3[1],xmm11[2],xmm3[2],xmm11[3],xmm3[3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm7, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm14[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm10, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm7, %zmm10 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm27, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm3[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm7, %zmm10, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm7 = xmm27[0],zero,xmm27[1],zero,xmm27[2],zero,xmm27[3],zero
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm28, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm3[0,0,2,1,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm12, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm11, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm3, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm28, %zmm29, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm14 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm20, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm19, %zmm15, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],zmm22[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm17, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm13, %zmm17, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,2,3],zmm23[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm26, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm8, %zmm17, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,1,8,3,4,9,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm6, %ymm8, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm3[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm7, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm6, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm9, %ymm8, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm4[0,1,2,3],zmm2[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm12, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm6, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 = zmm23[0,1,2,3],zmm21[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm3, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm14, %zmm18, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 = zmm25[0,1,2,3],zmm22[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm20, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm14, %zmm18, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 = zmm15[0,1,2,3],zmm19[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm29, %zmm26, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm14, %zmm15, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm16[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm31, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm15, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,8,3,4,9,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm4, %ymm3, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm9[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm6, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm5, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm7, %ymm3, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm10[0,1,2,3],zmm2[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm12, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm5, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 192(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 192(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 128(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, 320(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 256(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 64(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 320(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 256(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 64(%rax)
+; AVX512F-ONLY-FAST-NEXT:    popq %rax
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
@@ -3757,249 +3774,259 @@ define void @store_i16_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vmovdqa (%rcx), %xmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm5
 ; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm18
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm19
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm23
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm24
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,2,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rcx), %ymm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %ymm10
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[8],ymm9[8],ymm10[9],ymm9[9],ymm10[10],ymm9[10],ymm10[11],ymm9[11]
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rcx), %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %ymm14
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm14[0],ymm2[0],ymm14[1],ymm2[1],ymm14[2],ymm2[2],ymm14[3],ymm2[3],ymm14[8],ymm2[8],ymm14[9],ymm2[9],ymm14[10],ymm2[10],ymm14[11],ymm2[11]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm31
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[1,0,2,2,5,4,6,6]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa (%rsi), %xmm3
 ; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm22
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm21
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm28
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm27
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,1,1,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rsi), %ymm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm15
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm15[0],ymm14[0],ymm15[1],ymm14[1],ymm15[2],ymm14[2],ymm15[3],ymm14[3],ymm15[8],ymm14[8],ymm15[9],ymm14[9],ymm15[10],ymm14[10],ymm15[11],ymm14[11]
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rsi), %ymm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm11
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm11[0],ymm8[0],ymm11[1],ymm8[1],ymm11[2],ymm8[2],ymm11[3],ymm8[3],ymm11[8],ymm8[8],ymm11[9],ymm8[9],ymm11[10],ymm8[10],ymm11[11],ymm8[11]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
 ; AVX512DQ-SLOW-NEXT:    movw $18724, %ax # imm = 0x4924
 ; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm2, %zmm1, %zmm0 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %ymm12
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm12[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %ymm6
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm6[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %xmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm5, %xmm3, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm20
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm13, %xmm3, %xmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm25
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6],ymm2[7]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm0[0,1,2,3],zmm1[0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm24
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm23
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 = zmm0[0,1,2,3],zmm1[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %xmm1
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm26
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm0[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %ymm12
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm12[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm19 = ymm0[2,2,2,2]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm30
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm29
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,2,3,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm11
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm3
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm3[0],ymm11[0],ymm3[1],ymm11[1],ymm3[2],ymm11[2],ymm3[3],ymm11[3],ymm3[8],ymm11[8],ymm3[9],ymm11[9],ymm3[10],ymm11[10],ymm3[11],ymm11[11]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm0[1,0,2,2,5,4,6,6]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm4
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm0[1,1,1,1]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm0[1,0,2,2,5,4,6,6]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm15
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm15[4],xmm5[4],xmm15[5],xmm5[5],xmm15[6],xmm5[6],xmm15[7],xmm5[7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm0[1,1,1,1]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm0
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm2, %zmm13
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm6, %zmm17, %zmm13 {%k1}
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm2, %zmm7
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm10, %zmm16, %zmm7 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r8), %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm2[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm13, %ymm7
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm6[2],ymm7[3,4],ymm6[5],ymm7[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r8), %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm5, %xmm6, %xmm5
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm13[0],ymm5[1],ymm13[2,3],ymm5[4],ymm13[5,6],ymm5[7]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 = zmm5[0,1,2,3],zmm7[0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm1[2,1,2,3,6,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm5[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm0[2,1,2,3,6,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm2[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,2]
+; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm9 = ymm9[0,1],ymm10[2],ymm9[3,4],ymm10[5],ymm9[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r8), %xmm10
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm13, %xmm10, %xmm13
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0],ymm13[1],ymm7[2,3],ymm13[4],ymm7[5,6],ymm13[7]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm7[0,1,2,3],zmm9[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm1[2,1,2,3,6,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm7[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm7[0],ymm5[0],ymm7[1],ymm5[1],ymm7[2],ymm5[2],ymm7[3],ymm5[3],ymm7[8],ymm5[8],ymm7[9],ymm5[9],ymm7[10],ymm5[10],ymm7[11],ymm5[11]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[3,3,3,3]
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm1 = ymm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm11[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm7 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm7[0],ymm1[0],ymm7[1],ymm1[1],ymm7[2],ymm1[2],ymm7[3],ymm1[3],ymm7[8],ymm1[8],ymm7[9],ymm1[9],ymm7[10],ymm1[10],ymm7[11],ymm1[11]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm3[4],ymm11[4],ymm3[5],ymm11[5],ymm3[6],ymm11[6],ymm3[7],ymm11[7],ymm3[12],ymm11[12],ymm3[13],ymm11[13],ymm3[14],ymm11[14],ymm3[15],ymm11[15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm0[2,1,2,3,6,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm9[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm9[0],ymm7[0],ymm9[1],ymm7[1],ymm9[2],ymm7[2],ymm9[3],ymm7[3],ymm9[8],ymm7[8],ymm9[9],ymm7[9],ymm9[10],ymm7[10],ymm9[11],ymm7[11]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm0 = ymm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm4[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm9 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm9 = ymm9[0],ymm0[0],ymm9[1],ymm0[1],ymm9[2],ymm0[2],ymm9[3],ymm0[3],ymm9[8],ymm0[8],ymm9[9],ymm0[9],ymm9[10],ymm0[10],ymm9[11],ymm0[11]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r9), %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm0[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,2,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm21 = ymm13[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[3,3,3,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[1,2,3,3,5,6,7,7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm5, %zmm5
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm3, %zmm1, %zmm5 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm5, %ymm3
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm7, %zmm4
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm3, %zmm9, %zmm4 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm1, %ymm2, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm4, %ymm7
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm7[0],ymm3[1],ymm7[2,3],ymm3[4],ymm7[5,6],ymm3[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r9), %ymm7
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm7[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm9[2,2,2,2]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm5[1,2],ymm2[3],ymm5[4,5],ymm2[6],ymm5[7]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm11 = zmm2[0,1,2,3],zmm1[0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm14[2,1,2,3,6,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm15[2,1,2,3,6,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm4[1,2],ymm2[3],ymm4[4,5],ymm2[6],ymm4[7]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm2[0,1,2,3],zmm3[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm8[2,1,2,3,6,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm11[2,1,2,3,6,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm11[4],ymm8[4],ymm11[5],ymm8[5],ymm11[6],ymm8[6],ymm11[7],ymm8[7],ymm11[12],ymm8[12],ymm11[13],ymm8[13],ymm11[14],ymm8[14],ymm11[15],ymm8[15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm9
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm4 = ymm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm9[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm8 = ymm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm14[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm8[0],ymm4[0],ymm8[1],ymm4[1],ymm8[2],ymm4[2],ymm8[3],ymm4[3],ymm8[8],ymm4[8],ymm8[9],ymm4[9],ymm8[10],ymm4[10],ymm8[11],ymm4[11]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm7[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm8[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,3,2,3,6,7,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm7[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm7[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[3,3,3,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm7 = ymm14[4],ymm9[4],ymm14[5],ymm9[5],ymm14[6],ymm9[6],ymm14[7],ymm9[7],ymm14[12],ymm9[12],ymm14[13],ymm9[13],ymm14[14],ymm9[14],ymm14[15],ymm9[15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[1,2,3,3,5,6,7,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm7[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm3, %zmm4, %zmm2 {%k1}
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm15[4],ymm14[4],ymm15[5],ymm14[5],ymm15[6],ymm14[6],ymm15[7],ymm14[7],ymm15[12],ymm14[12],ymm15[13],ymm14[13],ymm15[14],ymm14[14],ymm15[15],ymm14[15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[3,3,3,3]
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm3 = ymm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm9[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm5 = ymm10[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm10[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm5[0],ymm3[0],ymm5[1],ymm3[1],ymm5[2],ymm3[2],ymm5[3],ymm3[3],ymm5[8],ymm3[8],ymm5[9],ymm3[9],ymm5[10],ymm3[10],ymm5[11],ymm3[11]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm10[4],ymm9[4],ymm10[5],ymm9[5],ymm10[6],ymm9[6],ymm10[7],ymm9[7],ymm10[12],ymm9[12],ymm10[13],ymm9[13],ymm10[14],ymm9[14],ymm10[15],ymm9[15]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm5[1,2,3,3,5,6,7,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm2, %zmm3, %zmm1 {%k1}
-; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm1, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm12, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm12[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm1[0,1,2,3],zmm0[0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %xmm12
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm12[2,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm0[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm8[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm4[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,5]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm4
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,0,2,2]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm2[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm2, %ymm2
-; AVX512DQ-SLOW-NEXT:    movw $9362, %ax # imm = 0x2492
-; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm2, %zmm1, %zmm0 {%k1}
-; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[2,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6],ymm1[7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm6[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm2[0,1,2,3],zmm1[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm12[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm2[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm12[2,3,2,3,6,7,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm2[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm15[0],xmm5[0],xmm15[1],xmm5[1],xmm15[2],xmm5[2],xmm15[3],xmm5[3]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7]
-; AVX512DQ-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm2 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm2, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r9), %xmm6
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm0[0,1,2,3],zmm1[0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %ymm0
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm0[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm6[2,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,2,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm15[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,5]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm7
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm6
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,0,2,2]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm4[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm4, %ymm4
+; AVX512DQ-SLOW-NEXT:    movw $9362, %ax # imm = 0x2492
+; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm4, %zmm3, %zmm2 {%k1}
+; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm2, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm10[2,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm4 = xmm10[0],zero,xmm10[1],zero,xmm10[2],zero,xmm10[3],zero
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm4, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm3[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm0[0,0,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm3, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm6
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm22[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm28[0,1,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm21[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,5]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm27[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,6,5]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm13
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm8[0],xmm13[0],xmm8[1],xmm13[1],xmm8[2],xmm13[2],xmm8[3],xmm13[3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,0,2,2]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm9
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,0,2,2]
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm7[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm7, %ymm7
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm7, %zmm5, %zmm4 {%k1}
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm6[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm6, %ymm6
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm6, %zmm5, %zmm4 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm4, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm8
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm8[2,1,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm7[0],ymm5[1,2],ymm7[3],ymm5[4,5],ymm7[6],ymm5[7]
-; AVX512DQ-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm7 = xmm20[0],zero,xmm20[1],zero,xmm20[2],zero,xmm20[3],zero
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm7, %ymm7
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm7[2],ymm4[3,4],ymm7[5],ymm4[6,7]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm5[0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r9), %ymm5
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm5[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm5[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm8[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,3,2,3,6,7,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm5[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm6[0,0,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm14, %ymm14
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm7
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm7[2,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm6[0],ymm5[1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512DQ-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm6 = xmm25[0],zero,xmm25[1],zero,xmm25[2],zero,xmm25[3],zero
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm6, %ymm6
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm5[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm6
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm6[0,0,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm5, %ymm5
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,2,2,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm12[0,0,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm15, %ymm15
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[0,2,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,4,4,4]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,2,1]
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm10, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm16, %zmm10, %zmm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm3, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm17, %zmm10, %zmm3
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm8, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm11, %zmm7, %zmm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm13, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm9, %zmm7, %zmm0
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm14, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm7, %zmm6
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm15, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm7, %zmm2
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm18, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm17, %zmm9, %zmm7
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm21, %zmm10
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm20, %zmm9, %zmm10
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm8, %zmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm16, %zmm9, %zmm8
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm13, %zmm11
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm9, %zmm11
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm1, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm1, %zmm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, (%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, 192(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, 128(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, 320(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, 256(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, 64(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, 192(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, 128(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, 320(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, 256(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, 64(%rax)
 ; AVX512DQ-SLOW-NEXT:    vzeroupper
 ; AVX512DQ-SLOW-NEXT:    retq
 ;
 ; AVX512DQ-FAST-LABEL: store_i16_stride6_vf32:
 ; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $104, %rsp
+; AVX512DQ-FAST-NEXT:    subq $40, %rsp
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, (%rsp) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm4
@@ -4009,214 +4036,211 @@ define void @store_i16_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15]
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[3,3,3,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm25
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %ymm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm22
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %ymm3
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [5,6,5,6,5,6,7,7]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm1, %ymm4, %ymm1
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm2 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %ymm5
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm5[4],ymm3[4],ymm5[5],ymm3[5],ymm5[6],ymm3[6],ymm5[7],ymm3[7],ymm5[12],ymm3[12],ymm5[13],ymm3[13],ymm5[14],ymm3[14],ymm5[15],ymm3[15]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [5,6,5,6,5,6,7,7]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm1, %ymm2, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm3 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm4 = ymm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm5[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm29
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
 ; AVX512DQ-FAST-NEXT:    movw $18724, %ax # imm = 0x4924
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm2, %zmm25 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [8,21,10,11,20,13,14,23]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %ymm3
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm3, %zmm22 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [8,21,10,11,20,13,14,23]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %ymm4
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm3, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm23
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm2, %zmm24, %zmm23
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm3[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm31
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [12,1,2,13,4,5,14,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm2, %ymm5, %ymm25
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %ymm7
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm7, %ymm3
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm7, %ymm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm30
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %ymm7
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm6
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm9
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm9, %ymm0
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm6[0],ymm0[1],ymm6[1],ymm0[2],ymm6[2],ymm0[3],ymm6[3],ymm0[8],ymm6[8],ymm0[9],ymm6[9],ymm0[10],ymm6[10],ymm0[11],ymm6[11]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, %zmm20
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm3, %zmm21, %zmm20
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm4[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm31
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [12,1,2,13,4,5,14,7]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm3, %ymm9, %ymm22
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm4
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm5, %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm17
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm4[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %ymm5
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm15
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm15, %ymm0
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[1],ymm4[1],ymm0[2],ymm4[2],ymm0[3],ymm4[3],ymm0[8],ymm4[8],ymm0[9],ymm4[9],ymm0[10],ymm4[10],ymm0[11],ymm4[11]
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm6 = ymm9[4],ymm7[4],ymm9[5],ymm7[5],ymm9[6],ymm7[6],ymm9[7],ymm7[7],ymm9[12],ymm7[12],ymm9[13],ymm7[13],ymm9[14],ymm7[14],ymm9[15],ymm7[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm17
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[3,3,3,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm28
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %ymm8
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %ymm6
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm6[4],ymm8[4],ymm6[5],ymm8[5],ymm6[6],ymm8[6],ymm6[7],ymm8[7],ymm6[12],ymm8[12],ymm6[13],ymm8[13],ymm6[14],ymm8[14],ymm6[15],ymm8[15]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm0, %ymm4, %ymm0
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm4 = ymm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm8[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm7 = ymm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm6[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm7[0],ymm4[0],ymm7[1],ymm4[1],ymm7[2],ymm4[2],ymm7[3],ymm4[3],ymm7[8],ymm4[8],ymm7[9],ymm4[9],ymm7[10],ymm4[10],ymm7[11],ymm4[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm4, %zmm28 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm13
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm13, %ymm0
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm0, %zmm28, %zmm24
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm13[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm5, %ymm28
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm0
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm29 = ymm0[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm21 = ymm0[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %xmm10
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm10, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm12
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm12, %xmm2
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %xmm7
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm15[4],ymm5[4],ymm15[5],ymm5[5],ymm15[6],ymm5[6],ymm15[7],ymm5[7],ymm15[12],ymm5[12],ymm15[13],ymm5[13],ymm15[14],ymm5[14],ymm15[15],ymm5[15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm16
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[3,3,3,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm23
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %ymm13
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %ymm4
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm4[4],ymm13[4],ymm4[5],ymm13[5],ymm4[6],ymm13[6],ymm4[7],ymm13[7],ymm4[12],ymm13[12],ymm4[13],ymm13[13],ymm4[14],ymm13[14],ymm4[15],ymm13[15]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm0, %ymm2, %ymm0
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm2 = ymm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm13[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm10 = ymm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm4[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm10[0],ymm2[0],ymm10[1],ymm2[1],ymm10[2],ymm2[2],ymm10[3],ymm2[3],ymm10[8],ymm2[8],ymm10[9],ymm2[9],ymm10[10],ymm2[10],ymm10[11],ymm2[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm2, %zmm23 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm10
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm10, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm0, %zmm23, %zmm21
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm10[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm9, %ymm23
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm9
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm9, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm18 = ymm0[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm9, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm19 = ymm0[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %xmm12
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %xmm8
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm3
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm4[0],ymm13[0],ymm4[1],ymm13[1],ymm4[2],ymm13[2],ymm4[3],ymm13[3],ymm4[8],ymm13[8],ymm4[9],ymm13[9],ymm4[10],ymm13[10],ymm4[11],ymm13[11]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm24
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm12, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm13
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm13, %xmm4
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm0[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm25 = ymm0[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %xmm11
 ; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm26 = [1,0,2,2,1,0,2,2]
 ; AVX512DQ-FAST-NEXT:    # ymm26 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %xmm4
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm3, %ymm26, %ymm3
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm11 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm11[0],xmm5[0],xmm11[1],xmm5[1],xmm11[2],xmm5[2],xmm11[3],xmm5[3]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm5, %ymm5
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm18
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %xmm2
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm5, %ymm26, %ymm5
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm25, %zmm7
 ; AVX512DQ-FAST-NEXT:    movw $9362, %ax # imm = 0x2492
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k2
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm5, %zmm3, %zmm18 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [16,9,10,17,12,13,18,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %xmm2
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[2,1,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm2, %xmm16
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm1, %zmm20, %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm11
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm11, %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm27 = ymm0[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm14, %ymm26, %ymm14
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm15 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm15[0],xmm2[0],xmm15[1],xmm2[1],xmm15[2],xmm2[2],xmm15[3],xmm2[3]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm11[0],xmm3[0],xmm11[1],xmm3[1],xmm11[2],xmm3[2],xmm11[3],xmm3[3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm2, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm27, %zmm15, %zmm26
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm2, %zmm14, %zmm26 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %xmm2
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm2[2,1,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm14, %zmm26, %zmm20
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %xmm0
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm0[0,0,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm14, %ymm27
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm6[0],ymm8[0],ymm6[1],ymm8[1],ymm6[2],ymm8[2],ymm6[3],ymm8[3],ymm6[8],ymm8[8],ymm6[9],ymm8[9],ymm6[10],ymm8[10],ymm6[11],ymm8[11]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <17,18,17,18,u,u,19,19,5,4,2,2,5,4,6,6>
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm1, %zmm8, %zmm6
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm11[4],xmm3[4],xmm11[5],xmm3[5],xmm11[6],xmm3[6],xmm11[7],xmm3[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,1,1,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm17, %ymm3
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm9[0],ymm3[0],ymm9[1],ymm3[1],ymm9[2],ymm3[2],ymm9[3],ymm3[3],ymm9[8],ymm3[8],ymm9[9],ymm3[9],ymm9[10],ymm3[10],ymm9[11],ymm3[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm3, %zmm1, %zmm6 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [8,9,20,11,12,21,14,15]
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm13[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, %zmm9
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm3, %zmm1, %zmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm7
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm11 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm11 = ymm5[0],mem[0],ymm5[1],mem[1],ymm5[2],mem[2],ymm5[3],mem[3],ymm5[8],mem[8],ymm5[9],mem[9],ymm5[10],mem[10],ymm5[11],mem[11]
-; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm13 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm4, %zmm8, %zmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,9,2,3,8,5,6,11]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm2, %ymm8, %ymm6
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %xmm2
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm9[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm2[0,0,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm9, %ymm9
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm3
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm10 = xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm5
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm5[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[1,1,1,1]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd (%rsp), %ymm5, %ymm15 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm15 = ymm5[0],mem[0],ymm5[1],mem[1],ymm5[2],mem[2],ymm5[3],mem[3],ymm5[8],mem[8],ymm5[9],mem[9],ymm5[10],mem[10],ymm5[11],mem[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm15, %zmm10, %zmm11 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm5
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm5[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm10, %zmm11, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm16, %xmm5
-; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm10 = xmm16[0],zero,xmm16[1],zero,xmm16[2],zero,xmm16[3],zero
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm5, %xmm4
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm4, %ymm8, %ymm11
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm11[0,1,2,3],zmm1[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm4
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm4[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm25[0,1,2,3],zmm23[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm11, %zmm8
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm28[0,1,2,3],zmm24[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm29, %zmm12
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm11, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,1,8,3,4,9,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm10, %ymm5, %ymm18
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm13, %ymm5, %ymm26
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm18[0,1,2,3],zmm22[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm27, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm10, %zmm7
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm26[0,1,2,3],zmm20[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm9, %zmm3
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm10, %zmm3
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm2, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm5, %zmm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm5, %zmm0
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm5, %zmm7 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [16,9,10,17,12,13,18,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm25
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %xmm4
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm4[2,1,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm0, %zmm27, %zmm25
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm5, %xmm6
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm6, %ymm26, %ymm26
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %xmm6
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm6[0,0,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm14, %ymm28
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm3, %ymm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm1
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm3, %zmm26, %zmm1 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %xmm3
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm3[2,1,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm8, %zmm1, %zmm27
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,1,1,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm16, %ymm5
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm15[0],ymm5[0],ymm15[1],ymm5[1],ymm15[2],ymm5[2],ymm15[3],ymm5[3],ymm15[8],ymm5[8],ymm15[9],ymm5[9],ymm15[10],ymm5[10],ymm15[11],ymm5[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <1,2,1,2,u,u,3,3,13,12,10,10,13,12,14,14>
+; AVX512DQ-FAST-NEXT:    vpermd %zmm24, %zmm8, %zmm15
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm5, %zmm0, %zmm15 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [8,9,20,11,12,21,14,15]
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm10[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm10
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm0, %zmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm14
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm5 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm5 = ymm14[0],mem[0],ymm14[1],mem[1],ymm14[2],mem[2],ymm14[3],mem[3],ymm14[8],mem[8],ymm14[9],mem[9],ymm14[10],mem[10],ymm14[11],mem[11]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm3, %xmm14
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [0,9,2,3,8,5,6,11]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm14, %ymm24, %ymm15
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm6, %xmm14
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm14[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX512DQ-FAST-NEXT:    vpermd %zmm2, %zmm8, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %xmm8
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm8[0,0,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm13, %ymm29
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm8, %xmm11
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm14 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm14 = ymm13[0],mem[0],ymm13[1],mem[1],ymm13[2],mem[2],ymm13[3],mem[3],ymm13[8],mem[8],ymm13[9],mem[9],ymm13[10],mem[10],ymm13[11],mem[11]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm8, %xmm8
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm9[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[1,1,1,1]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm14, %zmm12, %zmm2 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm12
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm12[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm12, %zmm2, %zmm0
+; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm12 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm4, %ymm24, %ymm2
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm6, %xmm4
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm17, %ymm5
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm5[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm22[0,1,2,3],zmm20[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm13, %zmm13
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm14, %zmm13
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm23[0,1,2,3],zmm21[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm18, %zmm16
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm14, %zmm16
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,1,8,3,4,9,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm12, %ymm6, %ymm7
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm3, %ymm6, %ymm1
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm7[0,1,2,3],zmm25[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm26, %zmm28, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm7, %zmm6
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm27[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm29, %zmm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm7, %zmm3
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm15[0,1,2,3],zmm10[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm7
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm8, %zmm7
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm0[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm1
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm8, %zmm1
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, 256(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 256(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, 64(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, 192(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, 128(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, 320(%rax)
-; AVX512DQ-FAST-NEXT:    addq $104, %rsp
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 192(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, 128(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, 320(%rax)
+; AVX512DQ-FAST-NEXT:    addq $40, %rsp
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
 ;
@@ -7444,7 +7468,7 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-ONLY-SLOW-LABEL: store_i16_stride6_vf64:
 ; AVX512F-ONLY-SLOW:       # %bb.0:
-; AVX512F-ONLY-SLOW-NEXT:    subq $264, %rsp # imm = 0x108
+; AVX512F-ONLY-SLOW-NEXT:    subq $408, %rsp # imm = 0x198
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rsi), %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
@@ -7452,22 +7476,24 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,1,1,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rsi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rcx), %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rcx), %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,2,3,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rcx), %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %ymm12
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm12[0],ymm11[0],ymm12[1],ymm11[1],ymm12[2],ymm11[2],ymm12[3],ymm11[3],ymm12[8],ymm11[8],ymm12[9],ymm11[9],ymm12[10],ymm11[10],ymm12[11],ymm11[11]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rcx), %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm31
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[1,0,2,2,5,4,6,6]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
@@ -7475,23 +7501,32 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm3[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm19
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm4, %xmm3, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm4, %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm31
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 (%r9), %xmm19
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm19[2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rsi), %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm10[4],xmm6[5],xmm10[5],xmm6[6],xmm10[6],xmm6[7],xmm10[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm0[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rsi), %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm15
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm15[4],xmm9[4],xmm15[5],xmm9[5],xmm15[6],xmm9[6],xmm15[7],xmm9[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[1,1,1,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rsi), %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm1
@@ -7499,950 +7534,993 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm3[2,2,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rcx), %xmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdx), %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm13 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm17 = xmm13[1,2,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rcx), %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdx), %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm15 = ymm14[0],ymm13[0],ymm14[1],ymm13[1],ymm14[2],ymm13[2],ymm14[3],ymm13[3],ymm14[8],ymm13[8],ymm14[9],ymm13[9],ymm14[10],ymm13[10],ymm14[11],ymm13[11]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[1,0,2,2,5,4,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,2,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rcx), %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%rdx), %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm8[0],ymm7[0],ymm8[1],ymm7[1],ymm8[2],ymm7[2],ymm8[3],ymm7[3],ymm8[8],ymm7[8],ymm8[9],ymm7[9],ymm8[10],ymm7[10],ymm8[11],ymm7[11]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[1,0,2,2,5,4,6,6]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm17[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm5, %zmm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm2, %zmm15 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm6[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm10[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm2, %zmm5 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%r8), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm2[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm15, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm5[2],ymm7[3,4],ymm5[5],ymm7[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%r8), %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm5, %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm17
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm15[0],ymm8[1],ymm15[2,3],ymm8[4],ymm15[5,6],ymm8[7]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm8[0,1,2,3],zmm7[0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm7 = ymm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm13[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm8 = ymm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm14[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm8[0],ymm7[0],ymm8[1],ymm7[1],ymm8[2],ymm7[2],ymm8[3],ymm7[3],ymm8[8],ymm7[8],ymm8[9],ymm7[9],ymm8[10],ymm7[10],ymm8[11],ymm7[11]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm14[4],ymm13[4],ymm14[5],ymm13[5],ymm14[6],ymm13[6],ymm14[7],ymm13[7],ymm14[12],ymm13[12],ymm14[13],ymm13[13],ymm14[14],ymm13[14],ymm14[15],ymm13[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[1,2,3,3,5,6,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm0[2,1,2,3,6,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm13[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm1[2,1,2,3,6,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm14[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm13 = ymm14[0],ymm13[0],ymm14[1],ymm13[1],ymm14[2],ymm13[2],ymm14[3],ymm13[3],ymm14[8],ymm13[8],ymm14[9],ymm13[9],ymm14[10],ymm13[10],ymm14[11],ymm13[11]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm2[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm5, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1],ymm6[2],ymm10[3,4],ymm6[5],ymm10[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%r8), %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm6, %xmm12
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2,3],ymm12[4],ymm5[5,6],ymm12[7]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm5[0,1,2,3],zmm10[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%r9), %xmm10
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm10[2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm10, %xmm11
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,2,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm23 = ymm5[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%r9), %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm5[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm10[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm10 = ymm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm7[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm12 = ymm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm8[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm12[0],ymm10[0],ymm12[1],ymm10[1],ymm12[2],ymm10[2],ymm12[3],ymm10[3],ymm12[8],ymm10[8],ymm12[9],ymm10[9],ymm12[10],ymm10[10],ymm12[11],ymm10[11]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm7 = ymm8[4],ymm7[4],ymm8[5],ymm7[5],ymm8[6],ymm7[6],ymm8[7],ymm7[7],ymm8[12],ymm7[12],ymm8[13],ymm7[13],ymm8[14],ymm7[14],ymm8[15],ymm7[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[1,2,3,3,5,6,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm0[2,1,2,3,6,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm8[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm1[2,1,2,3,6,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm12[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm12[0],ymm8[0],ymm12[1],ymm8[1],ymm12[2],ymm8[2],ymm12[3],ymm8[3],ymm12[8],ymm8[8],ymm12[9],ymm8[9],ymm12[10],ymm8[10],ymm12[11],ymm8[11]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[3,3,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm8[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm7, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm13, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm7[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm10, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm7, %ymm2, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm7, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm7, %ymm14
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm0, %ymm7
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0],ymm1[1],ymm7[2,3],ymm1[4],ymm7[5,6],ymm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm0 = ymm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm13[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm1 = ymm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm14[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm5[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm5[2,3,2,3,6,7,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm0 = ymm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm8[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm1 = ymm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm7[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm14[4],ymm13[4],ymm14[5],ymm13[5],ymm14[6],ymm13[6],ymm14[7],ymm13[7],ymm14[12],ymm13[12],ymm14[13],ymm13[13],ymm14[14],ymm13[14],ymm14[15],ymm13[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm0[1,2,3,3,5,6,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm7[4],ymm8[4],ymm7[5],ymm8[5],ymm7[6],ymm8[6],ymm7[7],ymm8[7],ymm7[12],ymm8[12],ymm7[13],ymm8[13],ymm7[14],ymm8[14],ymm7[15],ymm8[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm0[1,2,3,3,5,6,7,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rsi), %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm2[2,1,2,3,6,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm0[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm0[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm0[2,1,2,3,6,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm15 = ymm15[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm15[0],ymm8[0],ymm15[1],ymm8[1],ymm15[2],ymm8[2],ymm15[3],ymm8[3],ymm15[8],ymm8[8],ymm15[9],ymm8[9],ymm15[10],ymm8[10],ymm15[11],ymm8[11]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm15 = ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[3,3,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm8, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm1, %zmm7 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm0[2,1,2,3,6,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm12[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm12[0],ymm10[0],ymm12[1],ymm10[1],ymm12[2],ymm10[2],ymm12[3],ymm10[3],ymm12[8],ymm10[8],ymm12[9],ymm10[9],ymm12[10],ymm10[10],ymm12[11],ymm10[11]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm12 = ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[3,3,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm10, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm1, %zmm5 {%k1}
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r8), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm9, %ymm1, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm9, %ymm20
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm7, %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm15[0],ymm8[1],ymm15[2,3],ymm8[4],ymm15[5,6],ymm8[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm15 = ymm1[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm15[0],ymm7[1,2],ymm15[3],ymm7[4,5],ymm15[6],ymm7[7]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm8[0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, (%rsp) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,0,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm1, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm17
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm5, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm12[0],ymm10[1],ymm12[2,3],ymm10[4],ymm12[5,6],ymm10[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm1[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm12[0],ymm5[1,2],ymm12[3],ymm5[4,5],ymm12[6],ymm5[7]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm28 = zmm5[0,1,2,3],zmm10[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r9), %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm5[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm10[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm5[2,3,2,3,6,7,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm10[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm10[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[1,0,2,2]
 ; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm3, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm10[0],xmm6[1],xmm10[1],xmm6[2],xmm10[2],xmm6[3],xmm10[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm15[0],xmm9[0],xmm15[1],xmm9[1],xmm15[2],xmm9[2],xmm15[3],xmm9[3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm10[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,7,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm7, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm9[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,7,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm15[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,7,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm12[4],xmm9[4],xmm12[5],xmm9[5],xmm12[6],xmm9[6],xmm12[7],xmm9[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm10, %zmm3
 ; AVX512F-ONLY-SLOW-NEXT:    movw $9362, %ax # imm = 0x2492
 ; AVX512F-ONLY-SLOW-NEXT:    kmovw %eax, %k2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm4, %zmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm3, %zmm4 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm5[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm6[2,1,3,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm4, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm6[1,2],ymm3[3],ymm6[4,5],ymm3[6],ymm6[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm5, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm4[0,1,2,3],zmm3[0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm4, %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0],ymm9[1,2],ymm3[3],ymm9[4,5],ymm3[6],ymm9[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm6, %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm6[2],ymm4[3,4],ymm6[5],ymm4[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm26 = zmm4[0,1,2,3],zmm3[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm11[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm11, %xmm16
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm3, %ymm27
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rcx), %xmm3
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,0,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm6, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rsi), %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm10[0],xmm6[0],xmm10[1],xmm6[1],xmm10[2],xmm6[2],xmm10[3],xmm6[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm6[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,7,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm10[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,7,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm5, %zmm7 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r8), %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm5[2,1,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0],ymm9[1,2],ymm8[3],ymm9[4,5],ymm8[6],ymm9[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm9 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm9, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm9[2],ymm7[3,4],ymm9[5],ymm7[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm7[0,1,2,3],zmm8[0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm10[4],xmm6[4],xmm10[5],xmm6[5],xmm10[6],xmm6[6],xmm10[7],xmm6[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[1,1,1,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,0,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm9 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm10 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm9, %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rsi), %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm15
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm15[0],xmm9[0],xmm15[1],xmm9[1],xmm15[2],xmm9[2],xmm15[3],xmm9[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm9[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,7,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm15[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,7,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm11 = xmm11[4],xmm14[4],xmm11[5],xmm14[5],xmm11[6],xmm14[6],xmm11[7],xmm14[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm6, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm12, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm6, %zmm10 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r8), %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm6[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm10, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0],ymm12[1,2],ymm11[3],ymm12[4,5],ymm11[6],ymm12[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm11
+; AVX512F-ONLY-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm12 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm12, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm10[0,1],ymm12[2],ymm10[3,4],ymm12[5],ymm10[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm29 = zmm10[0,1,2,3],zmm11[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm15[4],xmm9[4],xmm15[5],xmm9[5],xmm15[6],xmm9[6],xmm15[7],xmm9[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[1,1,1,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,2,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm6, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm14[0],ymm13[0],ymm14[1],ymm13[1],ymm14[2],ymm13[2],ymm14[3],ymm13[3],ymm14[8],ymm13[8],ymm14[9],ymm13[9],ymm14[10],ymm13[10],ymm14[11],ymm13[11]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm7[0],ymm8[0],ymm7[1],ymm8[1],ymm7[2],ymm8[2],ymm7[3],ymm8[3],ymm7[8],ymm8[8],ymm7[9],ymm8[9],ymm7[10],ymm8[10],ymm7[11],ymm8[11]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[1,0,2,2,5,4,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm9, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm2 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm2, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm5, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm1[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm2, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm6, %xmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6],ymm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm1[0,1,2,3],zmm0[0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm10[4],xmm3[4],xmm10[5],xmm3[5],xmm10[6],xmm3[6],xmm10[7],xmm3[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm17
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[1,1,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm13
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm13[4],xmm5[5],xmm13[5],xmm5[6],xmm13[6],xmm5[7],xmm13[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[1,2,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm14[0],ymm4[0],ymm14[1],ymm4[1],ymm14[2],ymm4[2],ymm14[3],ymm4[3],ymm14[8],ymm4[8],ymm14[9],ymm4[9],ymm14[10],ymm4[10],ymm14[11],ymm4[11]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[1,0,2,2,5,4,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm7[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm8[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm3, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm1, %zmm7 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r8), %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm3[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm1[2],ymm8[3,4],ymm1[5],ymm8[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r8), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm9, %xmm1, %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0],ymm9[1],ymm7[2,3],ymm9[4],ymm7[5,6],ymm9[7]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm7[0,1,2,3],zmm8[0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm7 = ymm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm4[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm8 = ymm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm14[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm8[0],ymm7[0],ymm8[1],ymm7[1],ymm8[2],ymm7[2],ymm8[3],ymm7[3],ymm8[8],ymm7[8],ymm8[9],ymm7[9],ymm8[10],ymm7[10],ymm8[11],ymm7[11]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm14[4],ymm4[4],ymm14[5],ymm4[5],ymm14[6],ymm4[6],ymm14[7],ymm4[7],ymm14[12],ymm4[12],ymm14[13],ymm4[13],ymm14[14],ymm4[14],ymm14[15],ymm4[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[1,2,3,3,5,6,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm0[2,1,2,3,6,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm8[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm2[2,1,2,3,6,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm9[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[8],ymm8[8],ymm9[9],ymm8[9],ymm9[10],ymm8[10],ymm9[11],ymm8[11]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[3,3,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm4[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm7, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm2, %zmm0 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm6, %ymm3, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm0, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2,3],ymm2[4],ymm4[5,6],ymm2[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0],ymm0[1,2],ymm3[3],ymm0[4,5],ymm3[6],ymm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm0[0,1,2,3],zmm2[0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %xmm14
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm14[2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm30 = zmm1[0,1,2,3],zmm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r9), %xmm15
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm15[2,3,2,3]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm26 = ymm0[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm0[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm2[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%r9), %xmm15
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm15[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,2,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm2[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm7 = ymm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm11[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm8 = ymm12[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm12[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm8[0],ymm7[0],ymm8[1],ymm7[1],ymm8[2],ymm7[2],ymm8[3],ymm7[3],ymm8[8],ymm7[8],ymm8[9],ymm7[9],ymm8[10],ymm7[10],ymm8[11],ymm7[11]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm12[4],ymm11[4],ymm12[5],ymm11[5],ymm12[6],ymm11[6],ymm12[7],ymm11[7],ymm12[12],ymm11[12],ymm12[13],ymm11[13],ymm12[14],ymm11[14],ymm12[15],ymm11[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[1,2,3,3,5,6,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm18[2,1,2,3,6,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm9[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm16[2,1,2,3,6,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm11[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm9 = ymm11[0],ymm9[0],ymm11[1],ymm9[1],ymm11[2],ymm9[2],ymm11[3],ymm9[3],ymm11[8],ymm9[8],ymm11[9],ymm9[9],ymm11[10],ymm9[10],ymm11[11],ymm9[11]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm11 = ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm11[3,3,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm7, %zmm8 {%k1}
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm6, %ymm2, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm8, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0],ymm7[1],ymm9[2,3],ymm7[4],ymm9[5,6],ymm7[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0],ymm8[1,2],ymm9[3],ymm8[4,5],ymm9[6],ymm8[7]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm28 = zmm8[0,1,2,3],zmm7[0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%r9), %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm9[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm8[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm9[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm8[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm29 = ymm8[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[2,3,2,3,6,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm9[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm9[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm5[0],xmm13[0],xmm5[1],xmm13[1],xmm5[2],xmm13[2],xmm5[3],xmm13[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[1,0,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm13 = xmm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm13[0],xmm5[1],xmm13[1],xmm5[2],xmm13[2],xmm5[3],xmm13[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm5, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm12, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm10[0],xmm2[0],xmm10[1],xmm2[1],xmm10[2],xmm2[2],xmm10[3],xmm2[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm17[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,7,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm10[4],xmm6[4],xmm10[5],xmm6[5],xmm10[6],xmm6[6],xmm10[7],xmm6[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm0[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm5[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm0[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm10
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm10[4],xmm7[4],xmm10[5],xmm7[5],xmm10[6],xmm7[6],xmm10[7],xmm7[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm0[1,1,1,1]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm8[0],ymm2[0],ymm8[1],ymm2[1],ymm8[2],ymm2[2],ymm8[3],ymm2[3],ymm8[8],ymm2[8],ymm8[9],ymm2[9],ymm8[10],ymm2[10],ymm8[11],ymm2[11]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm0[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm0[1,2,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm14 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[1,0,2,2,5,4,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm3, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm11[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm14[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm9, %zmm11
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm3, %zmm11 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r8), %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm9[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm11, %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm14[0,1],ymm3[2],ymm14[3,4],ymm3[5],ymm14[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm14
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r8), %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm13, %xmm3, %xmm13
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0],ymm13[1],ymm11[2,3],ymm13[4],ymm11[5,6],ymm13[7]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 = zmm11[0,1,2,3],zmm14[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm11 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm13 = ymm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm0[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm11 = ymm13[0],ymm11[0],ymm13[1],ymm11[1],ymm13[2],ymm11[2],ymm13[3],ymm11[3],ymm13[8],ymm11[8],ymm13[9],ymm11[9],ymm13[10],ymm11[10],ymm13[11],ymm11[11]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm2[2,1,2,3,6,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm8[2,1,2,3,6,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm13[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm13[0],ymm1[0],ymm13[1],ymm1[1],ymm13[2],ymm1[2],ymm13[3],ymm1[3],ymm13[8],ymm1[8],ymm13[9],ymm1[9],ymm13[10],ymm1[10],ymm13[11],ymm1[11]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm8[4],ymm2[4],ymm8[5],ymm2[5],ymm8[6],ymm2[6],ymm8[7],ymm2[7],ymm8[12],ymm2[12],ymm8[13],ymm2[13],ymm8[14],ymm2[14],ymm8[15],ymm2[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[1,2,3,3,5,6,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[3,3,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm11, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm9, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm9[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 = zmm1[0,1,2,3],zmm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm0 = ymm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm8[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} ymm1 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm2[4],ymm8[4],ymm2[5],ymm8[5],ymm2[6],ymm8[6],ymm2[7],ymm8[7],ymm2[12],ymm8[12],ymm2[13],ymm8[13],ymm2[14],ymm8[14],ymm2[15],ymm8[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[1,2,3,3,5,6,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm11[2,1,2,3,6,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm8[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm2[2,1,2,3,6,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm9[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[8],ymm8[8],ymm9[9],ymm8[9],ymm9[10],ymm8[10],ymm9[11],ymm8[11]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm9 = ymm2[4],ymm11[4],ymm2[5],ymm11[5],ymm2[6],ymm11[6],ymm2[7],ymm11[7],ymm2[12],ymm11[12],ymm2[13],ymm11[13],ymm2[14],ymm11[14],ymm2[15],ymm11[15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[3,3,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm2, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm1, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0],ymm0[1],ymm8[2,3],ymm0[4],ymm8[5,6],ymm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0],ymm1[1,2],ymm8[3],ymm1[4,5],ymm8[6],ymm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm25, %zmm2, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm8 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm23, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm21, %zmm25, %zmm9
 ; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm12, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm5, %zmm6 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm6, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm1[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm2[0,2,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,4,4,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm12[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm1[0,1,2,3],zmm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm15[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm4, %ymm24
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm1, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm10[0],xmm7[0],xmm10[1],xmm7[1],xmm10[2],xmm7[2],xmm10[3],xmm7[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm7[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm10[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r9), %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm15[0,2,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm12[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm10, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm5, %zmm7 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm7, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm3[2,1,3,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
 ; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm10[0],ymm5[1,2],ymm10[3],ymm5[4,5],ymm10[6],ymm5[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm1, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0,1],ymm1[2],ymm6[3,4],ymm1[5],ymm6[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm1[0,1,2,3],zmm5[0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r9), %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm5[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm6[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm5[2,3,2,3,6,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm10[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm15[0,0,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm12, %ymm19
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 64(%r9), %xmm13
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm15[0,2,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,4,4,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm13[0,0,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm2, %ymm16
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm1[2,3,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,2,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm3, %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm7[0,1],ymm3[2],ymm7[3,4],ymm3[5],ymm7[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r9), %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm5[4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm13[0,2,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,2,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,0,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm4, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm12 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpsrldq {{.*#+}} xmm15 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm15[0],xmm12[0],xmm15[1],xmm12[1],xmm15[2],xmm12[2],xmm15[3],xmm12[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm15 = ymm7[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,0,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm12, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm5, %zmm5
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm1[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,7,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm2[0,1,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,7,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r9), %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm5[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm4, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm3, %zmm4 {%k2}
-; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm4, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm1[2,1,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm8[0],ymm3[1,2],ymm8[3],ymm3[4,5],ymm8[6],ymm3[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm9[2,3,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,2,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm31 = xmm31[0],zero,xmm31[1],zero,xmm31[2],zero,xmm31[3],zero
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm31, %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm11[2],ymm4[3,4],ymm11[5],ymm4[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm27, %zmm26, %zmm11
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm11 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm4[0,1,2,3],zmm3[0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r9), %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm4[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm4[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm2[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,7,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm4[0,1,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm7[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,3,2,3,6,7,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm7[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm12, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa32 %zmm5, %zmm2 {%k2}
+; AVX512F-ONLY-SLOW-NEXT:    vextracti64x4 $1, %zmm2, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm4[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm12[0],ymm5[1,2],ymm12[3],ymm5[4,5],ymm12[6],ymm5[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm11[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm12 = ymm12[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm18, %zmm25, %zmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,3,2,3,6,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm4[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm18 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm9[0,0,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm2, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm13, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,2,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,4,4,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm14[0,0,2,1,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm13, %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[0,2,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,4,4,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm21, %zmm26, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm24, %zmm26, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm29, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21, %zmm8 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm17, %zmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, (%rsp), %zmm21, %zmm10 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm12, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm23, %zmm21, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm6, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm28, %zmm21, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm19, %zmm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm6 # 64-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm16, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm22, %zmm12, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm2, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm20, %zmm12, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm13, %zmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm12, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm11[2,3,2,3,6,7,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm13[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm23 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm23, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm1[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm4, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm5[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm11
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm11[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq %xmm5, %ymm5
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[0,2,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,4,4,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm30, %zmm25, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm10, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm18, %zmm25, %zmm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm15 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm15 # 64-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm20, %zmm22, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm28, %zmm18, %zmm19
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm14, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm17, %zmm18, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm12
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm21, %zmm18, %zmm12
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm27, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm26, %zmm14, %zmm13
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm24, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm29, %zmm14, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm4, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm14, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm5, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm14, %zmm3
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 192(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 128(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 320(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 256(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 448(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 384(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 576(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 512(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 704(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, 640(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    addq $264, %rsp # imm = 0x108
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, (%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 192(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, 128(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 320(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm10, 256(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 448(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 384(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm13, 576(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, 512(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm15, 704(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, 640(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm8, 64(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    addq $408, %rsp # imm = 0x198
 ; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
 ; AVX512F-ONLY-SLOW-NEXT:    retq
 ;
 ; AVX512F-ONLY-FAST-LABEL: store_i16_stride6_vf64:
 ; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $1048, %rsp # imm = 0x418
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, (%rsp) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512F-ONLY-FAST-NEXT:    subq $1064, %rsp # imm = 0x428
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm27 = <17,18,17,18,u,u,19,19,5,4,2,2,5,4,6,6>
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm1, %zmm27, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rcx), %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm9, (%rsp) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm8
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,1,1,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rcx), %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdx), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm11[0],ymm2[1],ymm11[1],ymm2[2],ymm11[2],ymm2[3],ymm11[3],ymm2[8],ymm11[8],ymm2[9],ymm11[9],ymm2[10],ymm11[10],ymm2[11],ymm11[11]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rcx), %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdx), %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm6, %xmm23
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rcx), %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm6[0],ymm14[0],ymm6[1],ymm14[1],ymm6[2],ymm14[2],ymm6[3],ymm14[3],ymm6[8],ymm14[8],ymm6[9],ymm14[9],ymm6[10],ymm14[10],ymm6[11],ymm14[11]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm8, %xmm31
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm7, %xmm21
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm0[0],ymm4[0],ymm0[1],ymm4[1],ymm0[2],ymm4[2],ymm0[3],ymm4[3],ymm0[8],ymm4[8],ymm0[9],ymm4[9],ymm0[10],ymm4[10],ymm0[11],ymm4[11]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm9[4],xmm3[4],xmm9[5],xmm3[5],xmm9[6],xmm3[6],xmm9[7],xmm3[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[1,1,1,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm12 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <1,2,1,2,u,u,3,3,13,12,10,10,13,12,14,14>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm1, %zmm22, %zmm18
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm7, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    movw $18724, %ax # imm = 0x4924
-; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [8,9,20,11,12,21,14,15]
+; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm18 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [8,9,20,11,12,21,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm1, %zmm26, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm31 = [0,9,2,3,8,5,6,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm1, %ymm31, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm1, %zmm17, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,9,2,3,8,5,6,11]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm1, %ymm8, %ymm18
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm1[0,1,0,1]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm0[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm1[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rsi), %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdi), %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[1,1,1,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rsi), %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdi), %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm15[0],ymm13[0],ymm15[1],ymm13[1],ymm15[2],ymm13[2],ymm15[3],ymm13[3],ymm15[8],ymm13[8],ymm15[9],ymm13[9],ymm15[10],ymm13[10],ymm15[11],ymm13[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm12, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm22, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm16 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r8), %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm3[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm12, %zmm17, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r8), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm0, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm12, %ymm8, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r9), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm12[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r9), %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm12[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm9[2,2,2,2]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rcx), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdx), %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rcx), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdx), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm9 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm0, %zmm27, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rsi), %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdi), %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,1,1,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rsi), %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdi), %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm8[0],ymm5[0],ymm8[1],ymm5[1],ymm8[2],ymm5[2],ymm8[3],ymm5[3],ymm8[8],ymm5[8],ymm8[9],ymm5[9],ymm8[10],ymm5[10],ymm8[11],ymm5[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm9 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r8), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm0[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm11, %zmm26, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r8), %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm12, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm12, %xmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm11, %ymm31, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,2,3],zmm10[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r9), %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm9, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r9), %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm10[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [5,6,5,6,5,6,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm9 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm9 = ymm2[4],ymm11[4],ymm2[5],ymm11[5],ymm2[6],ymm11[6],ymm2[7],ymm11[7],ymm2[12],ymm11[12],ymm2[13],ymm11[13],ymm2[14],ymm11[14],ymm2[15],ymm11[15]
 ; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm9, %ymm24, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm1 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm11 = ymm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm11[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm2 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm5, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm8, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm11[0],ymm2[0],ymm11[1],ymm2[1],ymm11[2],ymm2[2],ymm11[3],ymm2[3],ymm11[8],ymm2[8],ymm11[9],ymm2[9],ymm11[10],ymm2[10],ymm11[11],ymm2[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm8[4],ymm5[4],ymm8[5],ymm5[5],ymm8[6],ymm5[6],ymm8[7],ymm5[7],ymm8[12],ymm5[12],ymm8[13],ymm5[13],ymm8[14],ymm5[14],ymm8[15],ymm5[15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[3,3,3,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm1, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm21 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [8,21,10,11,20,13,14,23]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm1, %zmm17, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [12,1,2,13,4,5,14,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm1, %ymm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm1, %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm10, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm11[0],ymm2[1],ymm11[1],ymm2[2],ymm11[2],ymm2[3],ymm11[3],ymm2[8],ymm11[8],ymm2[9],ymm11[9],ymm2[10],ymm11[10],ymm2[11],ymm11[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm2[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm19
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm10[0],ymm0[0],ymm10[1],ymm0[1],ymm10[2],ymm0[2],ymm10[3],ymm0[3],ymm10[8],ymm0[8],ymm10[9],ymm0[9],ymm10[10],ymm0[10],ymm10[11],ymm0[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm10 = ymm15[4],ymm13[4],ymm15[5],ymm13[5],ymm15[6],ymm13[6],ymm15[7],ymm13[7],ymm15[12],ymm13[12],ymm15[13],ymm13[13],ymm15[14],ymm13[14],ymm15[15],ymm13[15]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[3,3,3,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm11, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm9, %zmm25 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [8,21,10,11,20,13,14,23]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm2, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm0, %zmm20, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm3[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [12,1,2,13,4,5,14,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm2, %ymm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm2, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm10[u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm12[u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rcx), %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm1[4],ymm8[4],ymm1[5],ymm8[5],ymm1[6],ymm8[6],ymm1[7],ymm8[7],ymm1[12],ymm8[12],ymm1[13],ymm8[13],ymm1[14],ymm8[14],ymm1[15],ymm8[15]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm0, %ymm24, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm0 = ymm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm8[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm2 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm0[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rsi), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm2, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm0, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[8],ymm10[8],ymm11[9],ymm10[9],ymm11[10],ymm10[10],ymm11[11],ymm10[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm11 = ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[3,3,3,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm9, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm5, %zmm28 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm28, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r8), %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm12, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm17, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm12[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm5, %ymm15, %ymm28
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm23 = [1,0,2,2,1,0,2,2]
-; AVX512F-ONLY-FAST-NEXT:    # ymm23 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm5, %ymm23, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm6[4],ymm14[4],ymm6[5],ymm14[5],ymm6[6],ymm14[6],ymm6[7],ymm14[7],ymm6[12],ymm14[12],ymm6[13],ymm14[13],ymm6[14],ymm14[14],ymm6[15],ymm14[15]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm0, %ymm24, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm3 = ymm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm14[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm6 = ymm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm6[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm6[0],ymm3[0],ymm6[1],ymm3[1],ymm6[2],ymm3[2],ymm6[3],ymm3[3],ymm6[8],ymm3[8],ymm6[9],ymm3[9],ymm6[10],ymm3[10],ymm6[11],ymm3[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rsi), %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm11, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm9[0],ymm6[0],ymm9[1],ymm6[1],ymm9[2],ymm6[2],ymm9[3],ymm6[3],ymm9[8],ymm6[8],ymm9[9],ymm6[9],ymm9[10],ymm6[10],ymm9[11],ymm6[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm9 = ymm15[4],ymm11[4],ymm15[5],ymm11[5],ymm15[6],ymm11[6],ymm15[7],ymm11[7],ymm15[12],ymm11[12],ymm15[13],ymm11[13],ymm15[14],ymm11[14],ymm15[15],ymm11[15]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[3,3,3,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm6, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm14 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r8), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm3, %zmm20, %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm0[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm3, %ymm10, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [1,0,2,2,1,0,2,2]
+; AVX512F-ONLY-FAST-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm23, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm3, %ymm19, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm9 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm9[0],xmm6[0],xmm9[1],xmm6[1],xmm9[2],xmm6[2],xmm9[3],xmm6[3]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm6, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm7, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm1, %zmm30
+; AVX512F-ONLY-FAST-NEXT:    movw $9362, %ax # imm = 0x2492
+; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm30 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [16,9,10,17,12,13,18,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm1, %zmm23, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm31, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm19, %ymm31
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm3, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm6, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rsi), %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm7, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm5, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm6, %zmm30
-; AVX512F-ONLY-FAST-NEXT:    movw $9362, %ax # imm = 0x2492
-; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm30 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [16,9,10,17,12,13,18,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm14[2,1,3,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm3, %zmm29, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rcx), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rsi), %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm4, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm5, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm20
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm16
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm8[0],ymm1[1],ymm8[1],ymm1[2],ymm8[2],ymm1[3],ymm8[3],ymm1[8],ymm8[8],ymm1[9],ymm8[9],ymm1[10],ymm8[10],ymm1[11],ymm8[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm3, %zmm27, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[1,1,1,1]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm12[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm0, %zmm26, %zmm18
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm31, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm31
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm31 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm28
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r8), %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm2, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, %xmm14
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm31, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm11 = ymm15[0],ymm3[0],ymm15[1],ymm3[1],ymm15[2],ymm3[2],ymm15[3],ymm3[3],ymm15[8],ymm3[8],ymm15[9],ymm3[9],ymm15[10],ymm3[10],ymm15[11],ymm3[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm2, %zmm27, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm2[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm3, %zmm23, %zmm28
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,1,1,1]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm15[0],ymm11[0],ymm15[1],ymm11[1],ymm15[2],ymm11[2],ymm15[3],ymm11[3],ymm15[8],ymm11[8],ymm15[9],ymm11[9],ymm15[10],ymm11[10],ymm15[11],ymm11[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm27, %zmm22, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm27 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm0, %zmm17, %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm2, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm8, %ymm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %xmm7
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm2[1,1,1,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm13 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm12, %zmm11 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm12[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm13, %zmm11, %zmm26
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[1,1,1,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm15[0],ymm11[0],ymm15[1],ymm11[1],ymm15[2],ymm11[2],ymm15[3],ymm11[3],ymm15[8],ymm11[8],ymm15[9],ymm11[9],ymm15[10],ymm11[10],ymm15[11],ymm11[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm1[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm3 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm3 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm2[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm0, %zmm3, %zmm17
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm7, %xmm0, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm7, %ymm31, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r9), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm0, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm16, %ymm23, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm19, %ymm19
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm20[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm7, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm10, %zmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm7, %zmm27 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm14[2,1,3,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm7, %zmm29, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm0, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm18[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm11[0,1,2,3],zmm26[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm8, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm1[4],ymm4[4],ymm1[5],ymm4[5],ymm1[6],ymm4[6],ymm1[7],ymm4[7],ymm1[12],ymm4[12],ymm1[13],ymm4[13],ymm1[14],ymm4[14],ymm1[15],ymm4[15]
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm8 = ymm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm4[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm4 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm4[0],ymm8[0],ymm4[1],ymm8[1],ymm4[2],ymm8[2],ymm4[3],ymm8[3],ymm4[8],ymm8[8],ymm4[9],ymm8[9],ymm4[10],ymm8[10],ymm4[11],ymm8[11]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm15, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm13[0],ymm8[0],ymm13[1],ymm8[1],ymm13[2],ymm8[2],ymm13[3],ymm8[3],ymm13[8],ymm8[8],ymm13[9],ymm8[9],ymm13[10],ymm8[10],ymm13[11],ymm8[11]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r9), %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm12[2,2,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm15[4],ymm3[4],ymm15[5],ymm3[5],ymm15[6],ymm3[6],ymm15[7],ymm3[7],ymm15[12],ymm3[12],ymm15[13],ymm3[13],ymm15[14],ymm3[14],ymm15[15],ymm3[15]
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm3 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm7 = ymm15[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm15[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm7[0],ymm3[0],ymm7[1],ymm3[1],ymm7[2],ymm3[2],ymm7[3],ymm3[3],ymm7[8],ymm3[8],ymm7[9],ymm3[9],ymm7[10],ymm3[10],ymm7[11],ymm3[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm4, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm2, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm10[0],ymm7[0],ymm10[1],ymm7[1],ymm10[2],ymm7[2],ymm10[3],ymm7[3],ymm10[8],ymm7[8],ymm10[9],ymm7[9],ymm10[10],ymm7[10],ymm10[11],ymm7[11]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r9), %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm13, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm4[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm0[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm24, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm3[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm10[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[3,3,3,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm0 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm12, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm2, %zmm17, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm18 = ymm2[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm12[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [12,1,2,13,4,5,14,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm2, %ymm16, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm10[4],ymm3[4],ymm10[5],ymm3[5],ymm10[6],ymm3[6],ymm10[7],ymm3[7],ymm10[12],ymm3[12],ymm10[13],ymm3[13],ymm10[14],ymm3[14],ymm10[15],ymm3[15]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm2, %ymm24, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm3 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm10 = ymm10[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm10[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm10[0],ymm3[0],ymm10[1],ymm3[1],ymm10[2],ymm3[2],ymm10[3],ymm3[3],ymm10[8],ymm3[8],ymm10[9],ymm3[9],ymm10[10],ymm3[10],ymm10[11],ymm3[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm2, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm15, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[8],ymm10[8],ymm11[9],ymm10[9],ymm11[10],ymm10[10],ymm11[11],ymm10[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm11[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm25 = ymm12[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm12 = ymm15[4],ymm2[4],ymm15[5],ymm2[5],ymm15[6],ymm2[6],ymm15[7],ymm2[7],ymm15[12],ymm2[12],ymm15[13],ymm2[13],ymm15[14],ymm2[14],ymm15[15],ymm2[15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[3,3,3,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm3, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm10, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm2, %zmm24 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm3, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm2, %zmm24, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm3[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm2, %ymm16, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm11, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm11, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm2, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm16
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm2, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm2, %ymm23, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm8[0],xmm3[0],xmm8[1],xmm3[1],xmm8[2],xmm3[2],xmm8[3],xmm3[3]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm3, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm6, %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm5, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm11 = ymm15[4],ymm11[4],ymm15[5],ymm11[5],ymm15[6],ymm11[6],ymm15[7],ymm11[7],ymm15[12],ymm11[12],ymm15[13],ymm11[13],ymm15[14],ymm11[14],ymm15[15],ymm11[15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm13[u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm12[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm0, %ymm24, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[3,3,3,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm8, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm4, %zmm1 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm2, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm8, %zmm20, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r9), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm8[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm13[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm8[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [12,1,2,13,4,5,14,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm2, %ymm6, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm0[4],ymm8[4],ymm0[5],ymm8[5],ymm0[6],ymm8[6],ymm0[7],ymm8[7],ymm0[12],ymm8[12],ymm0[13],ymm8[13],ymm0[14],ymm8[14],ymm0[15],ymm8[15]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm2, %ymm24, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm11, %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm15[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm15 = ymm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm8[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm12 = ymm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm0[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm12 = ymm12[0],ymm15[0],ymm12[1],ymm15[1],ymm12[2],ymm15[2],ymm12[3],ymm15[3],ymm12[8],ymm15[8],ymm12[9],ymm15[9],ymm12[10],ymm15[10],ymm12[11],ymm15[11]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm0, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm8, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm13 = ymm13[0],ymm15[0],ymm13[1],ymm15[1],ymm13[2],ymm15[2],ymm13[3],ymm15[3],ymm13[8],ymm15[8],ymm13[9],ymm15[9],ymm13[10],ymm15[10],ymm13[11],ymm15[11]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm15 = ymm8[4],ymm0[4],ymm8[5],ymm0[5],ymm8[6],ymm0[6],ymm8[7],ymm0[7],ymm8[12],ymm0[12],ymm8[13],ymm0[13],ymm8[14],ymm0[14],ymm8[15],ymm0[15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm0[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[3,3,3,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm12, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm13, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm12, %zmm2 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm8, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm12, %zmm2, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm8[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm12, %ymm6, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm8, %ymm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsp), %xmm10 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm0, %ymm19, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm12 = xmm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm13 = xmm10[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm12, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm7, %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm13 = xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm2 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm4[2,1,3,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm29, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm5, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm5 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsp), %xmm8 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm5, %ymm23, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm8 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm23 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm23 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm26, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm19 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm18, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm18 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21, %zmm9 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm9 = zmm21[0,1,2,3],mem[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm20 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm9, %zmm21, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm28, %zmm9 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm9 = zmm28[0,1,2,3],mem[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm22 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm9, %zmm21, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm10[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm11[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm1, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm21, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm6, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm6[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm7, %zmm23, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm10, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm9[4],xmm6[4],xmm9[5],xmm6[5],xmm9[6],xmm6[6],xmm9[7],xmm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm15[0,0,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm9, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm15, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm25 # 16-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # xmm25 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm7, %ymm19, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm12 = xmm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm13 = xmm10[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm13 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm13 = zmm18[0,1,2,3],mem[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm15 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm13, %zmm18, %zmm15
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm13 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm13 = zmm16[0,1,2,3],mem[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm16 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm13, %zmm18, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm27[0,1,2,3],zmm21[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm19 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm13, %zmm18, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm17[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm26, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm18, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm3 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm3 = zmm25[0,1,2,3],mem[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm13 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm17, %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm14[0,1,2,3],zmm29[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm14 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm17, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm4[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm24[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm22[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm17, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm18 # 16-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # xmm18 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm12, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm7, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm8, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm9[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm12, %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm9, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm29 = ymm9[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm22 # 16-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # xmm22 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm8[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm7, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm8, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm24 = mem[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq $230, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm25 = mem[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm26 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm4, %zmm1 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm9[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm4, %zmm1, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm6[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm6, %xmm10
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm10, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm13[0,0,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm10, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm13, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm16[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm13 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm5, %zmm6 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm14[2,1,3,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm5, %zmm6, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm4[0,0,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm5, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm4, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm28 = xmm14[0],zero,xmm14[1],zero,xmm14[2],zero,xmm14[3],zero
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm14, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[0,0,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm14, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 = zmm24[0,1,2,3],zmm17[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm26, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm17, %zmm21, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [0,1,8,3,4,9,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm17, %ymm30
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm25, %ymm17, %ymm27
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm13, %ymm17, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm28, %ymm17, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm0 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm0 = zmm30[0,1,2,3],mem[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm9, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm27[0,1,2,3],zmm31[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm10, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm3[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm6[0,1,2,3],zmm29[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm14, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm27 = xmm9[0],zero,xmm9[1],zero,xmm9[2],zero,xmm9[3],zero
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm9, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm9, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm20[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm24, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,8,3,4,9,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm18, %ymm2, %ymm30
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm22, %ymm2, %ymm31
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm26, %ymm2, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm27, %ymm2, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm2 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm2 = zmm30[0,1,2,3],mem[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm29, %zmm21, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm18, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm31[0,1,2,3],zmm28[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm18, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm5[0,1,2,3],zmm0[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm4, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm18, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm23[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm9, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm18, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, (%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 192(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 128(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 320(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, 256(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 128(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 320(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 256(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, 448(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm7, 384(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm9, 576(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, 512(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 704(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, 640(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm8, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $1048, %rsp # imm = 0x418
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 384(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 576(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 512(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 704(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 640(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, 64(%rax)
+; AVX512F-ONLY-FAST-NEXT:    addq $1064, %rsp # imm = 0x428
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
 ; AVX512DQ-SLOW-LABEL: store_i16_stride6_vf64:
 ; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    subq $264, %rsp # imm = 0x108
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rcx), %xmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %xmm0
+; AVX512DQ-SLOW-NEXT:    subq $408, %rsp # imm = 0x198
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rcx), %xmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,2,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa (%rcx), %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %ymm3
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm16
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm18
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[1,0,2,2,5,4,6,6]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rsi), %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rsi), %xmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,1,1,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rsi), %ymm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm12
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm12[0],ymm11[0],ymm12[1],ymm11[1],ymm12[2],ymm11[2],ymm12[3],ymm11[3],ymm12[8],ymm11[8],ymm12[9],ymm11[9],ymm12[10],ymm11[10],ymm12[11],ymm11[11]
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rsi), %ymm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm31
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
 ; AVX512DQ-SLOW-NEXT:    movw $18724, %ax # imm = 0x4924
 ; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k1
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm2, %zmm1, %zmm0 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %ymm3
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm3[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm19
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %xmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm4, %xmm3, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm4, %xmm9
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm31
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %xmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm3, %xmm12
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6],ymm2[7]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rcx), %xmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdx), %xmm14
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm14[4],xmm6[4],xmm14[5],xmm6[5],xmm14[6],xmm6[6],xmm14[7],xmm6[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 (%r9), %xmm19
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm19[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm0[2,2,2,2]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rcx), %xmm15
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdx), %xmm9
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[1,2,3,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rcx), %ymm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdx), %ymm3
@@ -8450,890 +8528,891 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm0[1,0,2,2,5,4,6,6]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rsi), %xmm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm2
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm10 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm10[1,1,1,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rsi), %ymm10
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm13
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm15 = ymm13[0],ymm10[0],ymm13[1],ymm10[1],ymm13[2],ymm10[2],ymm13[3],ymm10[3],ymm13[8],ymm10[8],ymm13[9],ymm10[9],ymm13[10],ymm10[10],ymm13[11],ymm10[11]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[1,1,1,1]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rsi), %ymm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm8
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm8[0],ymm7[0],ymm8[1],ymm7[1],ymm8[2],ymm7[2],ymm8[3],ymm7[3],ymm8[8],ymm7[8],ymm8[9],ymm7[9],ymm8[10],ymm7[10],ymm8[11],ymm7[11]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,1,2,3]
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm5
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm15, %zmm17, %zmm5 {%k1}
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm10, %zmm6, %zmm5 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%r8), %ymm4
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm15 = ymm4[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm5, %ymm7
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm15[2],ymm7[3,4],ymm15[5],ymm7[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%r8), %xmm15
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm9, %xmm15, %xmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm17
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0],ymm8[1],ymm5[2,3],ymm8[4],ymm5[5,6],ymm8[7]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm7[0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm10[2,1,2,3,6,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm5[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm13[2,1,2,3,6,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm7[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm7[0],ymm5[0],ymm7[1],ymm5[1],ymm7[2],ymm5[2],ymm7[3],ymm5[3],ymm7[8],ymm5[8],ymm7[9],ymm5[9],ymm7[10],ymm5[10],ymm7[11],ymm5[11]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm7 = ymm13[4],ymm10[4],ymm13[5],ymm10[5],ymm13[6],ymm10[6],ymm13[7],ymm10[7],ymm13[12],ymm10[12],ymm13[13],ymm10[13],ymm13[14],ymm10[14],ymm13[15],ymm10[15]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm4[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2]
+; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm5, %ymm10
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0,1],ymm6[2],ymm10[3,4],ymm6[5],ymm10[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm10
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%r8), %xmm6
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm12, %xmm6, %xmm11
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm12, %xmm18
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0],ymm11[1],ymm5[2,3],ymm11[4],ymm5[5,6],ymm11[7]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm5[0,1,2,3],zmm10[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%r9), %xmm10
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm10[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm10, %xmm13
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,2,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm23 = ymm5[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%r9), %ymm5
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm5[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm10[2,2,2,2]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm7[2,1,2,3,6,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm10[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm8[2,1,2,3,6,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm11[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[8],ymm10[8],ymm11[9],ymm10[9],ymm11[10],ymm10[10],ymm11[11],ymm10[11]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm7 = ymm8[4],ymm7[4],ymm8[5],ymm7[5],ymm8[6],ymm7[6],ymm8[7],ymm7[7],ymm8[12],ymm7[12],ymm8[13],ymm7[13],ymm8[14],ymm7[14],ymm8[15],ymm7[15]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[3,3,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm8 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm10 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm10[0],ymm8[0],ymm10[1],ymm8[1],ymm10[2],ymm8[2],ymm10[3],ymm8[3],ymm10[8],ymm8[8],ymm10[9],ymm8[9],ymm10[10],ymm8[10],ymm10[11],ymm8[11]
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm11 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm11[0],ymm8[0],ymm11[1],ymm8[1],ymm11[2],ymm8[2],ymm11[3],ymm8[3],ymm11[8],ymm8[8],ymm11[9],ymm8[9],ymm11[10],ymm8[10],ymm11[11],ymm8[11]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,2]
 ; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm3[4],ymm1[4],ymm3[5],ymm1[5],ymm3[6],ymm1[6],ymm3[7],ymm1[7],ymm3[12],ymm1[12],ymm3[13],ymm1[13],ymm3[14],ymm1[14],ymm3[15],ymm1[15]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[1,2,3,3,5,6,7,7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm3
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm10, %zmm3
 ; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm1, %zmm8, %zmm3 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm5, %ymm4, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm5, %ymm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm7, %ymm4, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm7, %ymm14
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm3, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0],ymm1[1],ymm5[2,3],ymm1[4],ymm5[5,6],ymm1[7]
+; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm3, %ymm7
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm7[0],ymm1[1],ymm7[2,3],ymm1[4],ymm7[5,6],ymm1[7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm4[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
 ; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm3[0,1,2,3],zmm1[0,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm3[0,1,2,3],zmm1[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm5[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rsi), %ymm10
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm10[2,1,2,3,6,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm5[2,3,2,3,6,7,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rsi), %ymm12
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm12[2,1,2,3,6,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm13
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm13[2,1,2,3,6,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm8
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm8[2,1,2,3,6,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm3[0],ymm1[0],ymm3[1],ymm1[1],ymm3[2],ymm1[2],ymm3[3],ymm1[3],ymm3[8],ymm1[8],ymm3[9],ymm1[9],ymm3[10],ymm1[10],ymm3[11],ymm1[11]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm13[4],ymm10[4],ymm13[5],ymm10[5],ymm13[6],ymm10[6],ymm13[7],ymm10[7],ymm13[12],ymm10[12],ymm13[13],ymm10[13],ymm13[14],ymm10[14],ymm13[15],ymm10[15]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm8[4],ymm12[4],ymm8[5],ymm12[5],ymm8[6],ymm12[6],ymm8[7],ymm12[7],ymm8[12],ymm12[12],ymm8[13],ymm12[13],ymm8[14],ymm12[14],ymm8[15],ymm12[15]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm1[3,3,3,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm4
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm7 = ymm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm4[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm1
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm8 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm8[0],ymm7[0],ymm8[1],ymm7[1],ymm8[2],ymm7[2],ymm8[3],ymm7[3],ymm8[8],ymm7[8],ymm8[9],ymm7[9],ymm8[10],ymm7[10],ymm8[11],ymm7[11]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm1[4],ymm4[4],ymm1[5],ymm4[5],ymm1[6],ymm4[6],ymm1[7],ymm4[7],ymm1[12],ymm4[12],ymm1[13],ymm4[13],ymm1[14],ymm4[14],ymm1[15],ymm4[15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[1,2,3,3,5,6,7,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm7
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm10 = ymm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm7[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm4
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm11 = ymm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm4[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[8],ymm10[8],ymm11[9],ymm10[9],ymm11[10],ymm10[10],ymm11[11],ymm10[11]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,2]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm11 = ymm4[4],ymm7[4],ymm4[5],ymm7[5],ymm4[6],ymm7[6],ymm4[7],ymm7[7],ymm4[12],ymm7[12],ymm4[13],ymm7[13],ymm4[14],ymm7[14],ymm4[15],ymm7[15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm11[1,2,3,3,5,6,7,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
 ; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm5
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm8, %zmm7, %zmm5 {%k1}
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm11, %zmm10, %zmm5 {%k1}
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%r8), %ymm3
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm9, %ymm3, %ymm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm9, %ymm20
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm5, %ymm8
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2,3],ymm7[4],ymm8[5,6],ymm7[7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm3[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm8[0],ymm5[1,2],ymm8[3],ymm5[4,5],ymm8[6],ymm5[7]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm7[0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm5, (%rsp) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm14, %ymm3, %ymm10
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm16
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm5, %ymm11
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm10 = ymm11[0],ymm10[1],ymm11[2,3],ymm10[4],ymm11[5,6],ymm10[7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm10
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm3[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm11[0],ymm5[1,2],ymm11[3],ymm5[4,5],ymm11[6],ymm5[7]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm28 = zmm5[0,1,2,3],zmm10[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%r9), %ymm5
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm5[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm10[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm5[2,3,2,3,6,7,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm10[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm10[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,5]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[0,1,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,6,5]
 ; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm14[0],xmm6[0],xmm14[1],xmm6[1],xmm14[2],xmm6[2],xmm14[3],xmm6[3]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,0,2,2]
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3]
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm6, %ymm6
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm11 = xmm15[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm9 = xmm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3]
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm9, %ymm9
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
 ; AVX512DQ-SLOW-NEXT:    movw $9362, %ax # imm = 0x2492
 ; AVX512DQ-SLOW-NEXT:    kmovw %eax, %k2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm5, %zmm0
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm6, %zmm2, %zmm0 {%k2}
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm15[2,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm10, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm9, %zmm2, %zmm0 {%k2}
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[2,1,3,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm0, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm5[1,2],ymm2[3],ymm5[4,5],ymm2[6],ymm5[7]
-; AVX512DQ-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm5 = xmm15[0],zero,xmm15[1],zero,xmm15[2],zero,xmm15[3],zero
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm5, %ymm5
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rsi), %xmm0
+; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm0, %ymm9
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm9[1,2],ymm2[3],ymm9[4,5],ymm2[6],ymm9[7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm6, %ymm6
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm6[2],ymm0[3,4],ymm6[5],ymm0[6,7]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm26 = zmm0[0,1,2,3],zmm2[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm13[0,0,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm13, %xmm17
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm0, %ymm27
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rsi), %xmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm2
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,5]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rcx), %xmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm14
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm14[0],xmm6[0],xmm14[1],xmm6[1],xmm14[2],xmm6[2],xmm14[3],xmm6[3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[1,0,2,2]
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm15 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm9 = xmm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3]
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm9, %ymm9
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm7
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm9, %zmm8, %zmm7 {%k2}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%r8), %xmm5
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm5[2,1,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0],ymm9[1,2],ymm8[3],ymm9[4,5],ymm8[6],ymm9[7]
-; AVX512DQ-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm9 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm9, %ymm9
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm9[2],ymm7[3,4],ymm9[5],ymm7[6,7]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm7[0,1,2,3],zmm8[0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm14[4],xmm6[4],xmm14[5],xmm6[5],xmm14[6],xmm6[6],xmm14[7],xmm6[7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,2,3,3]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm4[0],ymm1[1],ymm4[1],ymm1[2],ymm4[2],ymm1[3],ymm4[3],ymm1[8],ymm4[8],ymm1[9],ymm4[9],ymm1[10],ymm4[10],ymm1[11],ymm4[11]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[1,0,2,2,5,4,6,6]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,1,1,1]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm6[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm13[0],ymm10[0],ymm13[1],ymm10[1],ymm13[2],ymm10[2],ymm13[3],ymm10[3],ymm13[8],ymm10[8],ymm13[9],ymm10[9],ymm13[10],ymm10[10],ymm13[11],ymm10[11]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm4[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm2, %zmm0, %zmm1 {%k1}
-; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm1, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm3[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm15
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm15, %xmm5, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm1[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,7,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm2[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,7,6,5]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm9[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rcx), %xmm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm10
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[1,0,2,2]
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm14 = xmm10[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3]
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm0, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm15[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm6, %zmm11
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm14, %zmm11 {%k2}
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%r8), %xmm6
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm6[2,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm11, %ymm14
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm14[1,2],ymm0[3],ymm14[4,5],ymm0[6],ymm14[7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm14 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm14, %ymm14
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm11[0,1],ymm14[2],ymm11[3,4],ymm14[5],ymm11[6,7]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm29 = zmm11[0,1,2,3],zmm0[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,2,3,3]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm4[0],ymm7[0],ymm4[1],ymm7[1],ymm4[2],ymm7[2],ymm4[3],ymm7[3],ymm4[8],ymm7[8],ymm4[9],ymm7[9],ymm4[10],ymm7[10],ymm4[11],ymm7[11]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[1,0,2,2,5,4,6,6]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,1,1,1]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm8[0],ymm12[0],ymm8[1],ymm12[1],ymm8[2],ymm12[2],ymm8[3],ymm12[3],ymm8[8],ymm12[8],ymm8[9],ymm12[9],ymm8[10],ymm12[10],ymm8[11],ymm12[11]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm2, %zmm1, %zmm0 {%k1}
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm3[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
+; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm13
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm13, %xmm6, %xmm2
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6],ymm2[7]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm30 = zmm0[0,1,2,3],zmm1[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%r9), %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm0[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm5[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm0[2,2,2,2]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm7
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm10
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm1[0,1,2,3],zmm0[0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm10[4],xmm3[4],xmm10[5],xmm3[5],xmm10[6],xmm3[6],xmm10[7],xmm3[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm17
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm0[1,2,3,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm2
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[1,0,2,2,5,4,6,6]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm14
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm5
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm4[1,1,1,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm13
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm13[0],ymm4[0],ymm13[1],ymm4[1],ymm13[2],ymm4[2],ymm13[3],ymm4[3],ymm13[8],ymm4[8],ymm13[9],ymm4[9],ymm13[10],ymm4[10],ymm13[11],ymm4[11]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm9
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm8, %zmm7, %zmm9 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r8), %ymm3
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm3[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm10[4],xmm7[4],xmm10[5],xmm7[5],xmm10[6],xmm7[6],xmm10[7],xmm7[7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[1,2,3,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm9
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[8],ymm8[8],ymm9[9],ymm8[9],ymm9[10],ymm8[10],ymm9[11],ymm8[11]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm0[1,0,2,2,5,4,6,6]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm5
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm4
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm0[1,1,1,1]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm0
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm15 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm3, %zmm12
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm15, %zmm14, %zmm12 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r8), %ymm15
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm15[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
+; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm12, %ymm14
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm14[0,1],ymm3[2],ymm14[3,4],ymm3[5],ymm14[6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm14
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r8), %xmm3
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm13, %xmm3, %xmm13
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm12[0],ymm13[1],ymm12[2,3],ymm13[4],ymm12[5,6],ymm13[7]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 = zmm12[0,1,2,3],zmm14[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm1[2,1,2,3,6,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm12[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm0[2,1,2,3,6,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm13[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm12 = ymm13[0],ymm12[0],ymm13[1],ymm12[1],ymm13[2],ymm12[2],ymm13[3],ymm12[3],ymm13[8],ymm12[8],ymm13[9],ymm12[9],ymm13[10],ymm12[10],ymm13[11],ymm12[11]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm1 = ymm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm8[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm13 = ymm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm9[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm13[0],ymm1[0],ymm13[1],ymm1[1],ymm13[2],ymm1[2],ymm13[3],ymm1[3],ymm13[8],ymm1[8],ymm13[9],ymm1[9],ymm13[10],ymm1[10],ymm13[11],ymm1[11]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm9[4],ymm8[4],ymm9[5],ymm8[5],ymm9[6],ymm8[6],ymm9[7],ymm8[7],ymm9[12],ymm8[12],ymm9[13],ymm8[13],ymm9[14],ymm8[14],ymm9[15],ymm8[15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[3,3,3,3]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm9, %ymm7
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm1[2],ymm7[3,4],ymm1[5],ymm7[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r8), %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm15, %xmm1, %xmm8
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6],ymm8[7]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm8[0,1,2,3],zmm7[0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm4[2,1,2,3,6,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm7[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm13[2,1,2,3,6,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm8[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm8[0],ymm7[0],ymm8[1],ymm7[1],ymm8[2],ymm7[2],ymm8[3],ymm7[3],ymm8[8],ymm7[8],ymm8[9],ymm7[9],ymm8[10],ymm7[10],ymm8[11],ymm7[11]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm13[4],ymm4[4],ymm13[5],ymm4[5],ymm13[6],ymm4[6],ymm13[7],ymm4[7],ymm13[12],ymm4[12],ymm13[13],ymm4[13],ymm13[14],ymm4[14],ymm13[15],ymm4[15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[3,3,3,3]
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm8 = ymm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm0[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm9 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[1,2,3,3,5,6,7,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm12, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm8, %zmm1, %zmm0 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm14
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm14, %ymm15, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm0, %ymm8
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm8[0],ymm1[1],ymm8[2,3],ymm1[4],ymm8[5,6],ymm1[7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm15[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm8[0],ymm0[1,2],ymm8[3],ymm0[4,5],ymm8[6],ymm0[7]
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm0[0,1,2,3],zmm1[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm9
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm31[2,1,2,3,6,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm8[2,1,2,3,6,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm8[4],ymm9[4],ymm8[5],ymm9[5],ymm8[6],ymm9[6],ymm8[7],ymm9[7],ymm8[12],ymm9[12],ymm8[13],ymm9[13],ymm8[14],ymm9[14],ymm8[15],ymm9[15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[3,3,3,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm8 = ymm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm13[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm9 = ymm12[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm12[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
 ; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[8],ymm8[8],ymm9[9],ymm8[9],ymm9[10],ymm8[10],ymm9[11],ymm8[11]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm9 = ymm12[4],ymm13[4],ymm12[5],ymm13[5],ymm12[6],ymm13[6],ymm12[7],ymm13[7],ymm12[12],ymm13[12],ymm12[13],ymm13[13],ymm12[14],ymm13[14],ymm12[15],ymm13[15]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[1,2,3,3,5,6,7,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm7, %zmm2
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm8, %zmm2 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm6, %ymm3, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm2, %ymm4
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm4[0],ymm0[1],ymm4[2,3],ymm0[4],ymm4[5,6],ymm0[7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm2[0,1,2,3],zmm0[0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %xmm13
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm13[2,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm26 = ymm0[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %ymm0
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm0[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm2[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%r9), %xmm15
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm15[2,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,2,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm2[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm11[2,1,2,3,6,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm7[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm12[2,1,2,3,6,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm8[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm8[0],ymm7[0],ymm8[1],ymm7[1],ymm8[2],ymm7[2],ymm8[3],ymm7[3],ymm8[8],ymm7[8],ymm8[9],ymm7[9],ymm8[10],ymm7[10],ymm8[11],ymm7[11]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm12[4],ymm11[4],ymm12[5],ymm11[5],ymm12[6],ymm11[6],ymm12[7],ymm11[7],ymm12[12],ymm11[12],ymm12[13],ymm11[13],ymm12[14],ymm11[14],ymm12[15],ymm11[15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[3,3,3,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm3
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm9 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} ymm11 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} ymm9 = ymm11[0],ymm9[0],ymm11[1],ymm9[1],ymm11[2],ymm9[2],ymm11[3],ymm9[3],ymm11[8],ymm9[8],ymm11[9],ymm9[9],ymm11[10],ymm9[10],ymm11[11],ymm9[11]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} ymm11 = ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm11[1,2,3,3,5,6,7,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm11[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm8, %zmm9, %zmm7 {%k1}
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm6, %ymm2, %ymm8
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0],ymm8[1],ymm9[2,3],ymm8[4],ymm9[5,6],ymm8[7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[1,2,3,3,5,6,7,7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm9[0],ymm7[1,2],ymm9[3],ymm7[4,5],ymm9[6],ymm7[7]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm28 = zmm7[0,1,2,3],zmm8[0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%r9), %ymm9
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm9[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm8[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm9[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm8[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm29 = ymm8[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[2,3,2,3,6,7,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm9[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm9[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,7,6,5]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm9, %zmm8, %zmm0 {%k1}
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm14, %ymm9, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm0, %ymm8
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm13 = ymm8[0],ymm1[1],ymm8[2,3],ymm1[4],ymm8[5,6],ymm1[7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm9[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm25, %zmm1, %zmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm8 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm23, %zmm9
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm21, %zmm1, %zmm9
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm11, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm11
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[0,2,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,4,4,4]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm11[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm11
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm0[0,1,2,3],zmm11[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,1,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,5]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,5]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm2[0,0,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm4, %ymm24
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm2[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm10[0],xmm2[0],xmm10[1],xmm2[1],xmm10[2],xmm2[2],xmm10[3],xmm2[3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[1,0,2,2]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm12, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm14[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm10 = xmm10[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm10[0],xmm6[0],xmm10[1],xmm6[1],xmm10[2],xmm6[2],xmm10[3],xmm6[3]
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm6, %ymm6
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm6, %zmm12, %zmm5 {%k2}
-; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm5, %ymm6
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm1[2,1,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm10[0],xmm7[0],xmm10[1],xmm7[1],xmm10[2],xmm7[2],xmm10[3],xmm7[3]
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm10[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r9), %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm13[1,0,2,2]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm7, %ymm7
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm7, %zmm10, %zmm5 {%k2}
+; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm5, %ymm7
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm3[2,1,3,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0],ymm6[1,2],ymm10[3],ymm6[4,5],ymm10[6],ymm6[7]
-; AVX512DQ-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm1, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm5[0,1],ymm1[2],ymm5[3,4],ymm1[5],ymm5[6,7]
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm1[0,1,2,3],zmm6[0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%r9), %ymm5
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm5[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm6[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm5[2,3,2,3,6,7,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm10[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm15[0,0,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm12, %ymm19
-; AVX512DQ-SLOW-NEXT:    vmovdqa 64(%r9), %xmm14
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm15[0,2,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,4,4,4]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm14[0,0,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm2, %ymm16
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm7 = ymm10[0],ymm7[1,2],ymm10[3],ymm7[4,5],ymm10[6],ymm7[7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm0[2,3,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[0,2,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
+; AVX512DQ-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm3, %ymm3
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm5 = ymm5[0,1],ymm3[2],ymm5[3,4],ymm3[5],ymm5[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r9), %ymm3
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm7[4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm2[0,1,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,5]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm14[0,2,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,4]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm2[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,7,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm4[0,1,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,7,6,5]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm13 = xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm3[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,2]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[2,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[0,2,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm5[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm7, %zmm7
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm9 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r9), %xmm9
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,0,2,2]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm8, %ymm8
-; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm8, %zmm4, %zmm3 {%k2}
-; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm3, %ymm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm1[2,1,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm8[0],ymm4[1,2],ymm8[3],ymm4[4,5],ymm8[6],ymm4[7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm9[2,3,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,2,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm31 = xmm31[0],zero,xmm31[1],zero,xmm31[2],zero,xmm31[3],zero
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm31, %ymm11
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm11[2],ymm3[3,4],ymm11[5],ymm3[6,7]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm27, %zmm26, %zmm11
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm11 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm4[0,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r9), %ymm4
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm4[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm4[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm12 = ymm12[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm18, %zmm25, %zmm18
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,3,2,3,6,7,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm4[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm18 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm9[0,0,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm2, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm14, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,2,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,4,4,4]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm13[0,0,2,1,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm14, %ymm14
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,2,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,4,4,4]
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm12 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpsrldq {{.*#+}} xmm15 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm15[0],xmm12[0],xmm15[1],xmm12[1],xmm15[2],xmm12[2],xmm15[3],xmm12[3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm15 = ymm3[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm15 = ymm15[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,3,6,7,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[1,0,2,2]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,1]
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm12, %ymm12
+; AVX512DQ-SLOW-NEXT:    vinserti32x8 $1, %ymm12, %zmm13, %zmm7 {%k2}
+; AVX512DQ-SLOW-NEXT:    vextracti64x4 $1, %zmm7, %ymm12
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm2[2,1,3,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm22, %zmm26, %zmm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm1
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm24, %zmm26, %zmm1
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm29, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm8 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm17, %zmm10
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, (%rsp), %zmm22, %zmm10 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm12, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm21, %zmm22, %zmm4
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm6, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm28, %zmm22, %zmm0
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm19, %zmm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm6 # 64-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm16, %zmm7
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm23, %zmm12, %zmm7
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm2, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm20, %zmm12, %zmm2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm14, %zmm9
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm12, %zmm9
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm12 = ymm13[0],ymm12[1,2],ymm13[3],ymm12[4,5],ymm13[6],ymm12[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm4[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,4,4,4,4,8,9,10,11,12,12,12,12]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm4[2,3,2,3,6,7,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm11[0,2,2,1,4,5,6,7,8,10,10,9,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpmovzxwd {{.*#+}} xmm23 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm23, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm7[0,1],ymm4[2],ymm7[3,4],ymm4[5],ymm7[6,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm0[0,0,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm7, %ymm7
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm12
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm12[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm2[0,0,2,1,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq %xmm12, %ymm12
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm30, %zmm1, %zmm6
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm10, %zmm10
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm18, %zmm1, %zmm10
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14, %zmm1 # 64-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm20, %zmm22, %zmm18
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm28, %zmm14, %zmm18
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm15, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm16, %zmm14, %zmm3
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm13, %zmm11
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm21, %zmm14, %zmm11
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm27, %zmm13
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm26, %zmm14, %zmm13
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm25, %zmm24, %zmm15
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm29, %zmm14, %zmm15
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm7, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm14, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm12, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm14, %zmm2
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, 192(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, 128(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, 320(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, 256(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, 448(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, 384(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, 576(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, 512(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, 704(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, 640(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, 64(%rax)
-; AVX512DQ-SLOW-NEXT:    addq $264, %rsp # imm = 0x108
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, (%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, 192(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm11, 128(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, 320(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm10, 256(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, 448(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm15, 384(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm13, 576(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm18, 512(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, 704(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, 640(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm8, 64(%rax)
+; AVX512DQ-SLOW-NEXT:    addq $408, %rsp # imm = 0x198
 ; AVX512DQ-SLOW-NEXT:    vzeroupper
 ; AVX512DQ-SLOW-NEXT:    retq
 ;
 ; AVX512DQ-FAST-LABEL: store_i16_stride6_vf64:
 ; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $1224, %rsp # imm = 0x4C8
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rsi), %ymm3
+; AVX512DQ-FAST-NEXT:    subq $920, %rsp # imm = 0x398
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rcx), %ymm8
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdx), %ymm6
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rcx), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rcx), %xmm7
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdx), %xmm14
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %xmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rcx), %xmm11
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %xmm12
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm12, (%rsp) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %xmm15
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %ymm3
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm2
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[3,3,3,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm30
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rcx), %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm6[0],ymm8[0],ymm6[1],ymm8[1],ymm6[2],ymm8[2],ymm6[3],ymm8[3],ymm6[8],ymm8[8],ymm6[9],ymm8[9],ymm6[10],ymm8[10],ymm6[11],ymm8[11]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm14[4],xmm7[4],xmm14[5],xmm7[5],xmm14[6],xmm7[6],xmm14[7],xmm7[7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm15[4],xmm11[4],xmm15[5],xmm11[5],xmm15[6],xmm11[6],xmm15[7],xmm11[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm15, %xmm28
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %ymm13
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %ymm2
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm2[0],ymm13[0],ymm2[1],ymm13[1],ymm2[2],ymm13[2],ymm2[3],ymm13[3],ymm2[8],ymm13[8],ymm2[9],ymm13[9],ymm2[10],ymm13[10],ymm2[11],ymm13[11]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm24
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm10, %xmm18
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm9, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rsi), %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdx), %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm2, %ymm9
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %ymm3
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm28 = [5,6,5,6,5,6,7,7]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm1, %ymm28, %ymm1
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm2 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm3 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm3, %ymm10
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm9 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[8],ymm9[8],ymm10[9],ymm9[9],ymm10[10],ymm9[10],ymm10[11],ymm9[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm10 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[3,3,3,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [5,6,5,6,5,6,7,7]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm12 = ymm6[4],ymm8[4],ymm6[5],ymm8[5],ymm6[6],ymm8[6],ymm6[7],ymm8[7],ymm6[12],ymm8[12],ymm6[13],ymm8[13],ymm6[14],ymm8[14],ymm6[15],ymm8[15]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm12, %ymm17, %ymm12
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm8 = ymm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm8[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm6 = ymm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm6[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm6[0],ymm8[0],ymm6[1],ymm8[1],ymm6[2],ymm8[2],ymm6[3],ymm8[3],ymm6[8],ymm8[8],ymm6[9],ymm8[9],ymm6[10],ymm8[10],ymm6[11],ymm8[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm21
 ; AVX512DQ-FAST-NEXT:    movw $18724, %ax # imm = 0x4924
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm2, %zmm30 {%k1}
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm12, %zmm6, %zmm21 {%k1}
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [8,21,10,11,20,13,14,23]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, %zmm23
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r8), %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm2, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm1, %zmm20, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [12,1,2,13,4,5,14,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm29, %ymm30
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm2, %ymm6
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm6, %zmm20, %zmm23
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [12,1,2,13,4,5,14,7]
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm6, %ymm19, %ymm21
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r9), %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm2, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm3, %ymm5
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm2, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm3, %ymm6
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rsi), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm2
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[3,3,3,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rcx), %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm1, %ymm28, %ymm1
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm2 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm3 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm2, %zmm21 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r8), %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm2, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm1, %zmm20, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm6
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm6[2,2,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm29, %ymm21
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r9), %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm2, %ymm6
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm6[2,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm5, %ymm7
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm2, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm6, %ymm16
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rsi), %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm5, %xmm2
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm31 = [1,0,2,2,1,0,2,2]
-; AVX512DQ-FAST-NEXT:    # ymm31 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rcx), %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdx), %xmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm6, (%rsp) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm3, %ymm31, %ymm3
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm5, %ymm5
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm22
-; AVX512DQ-FAST-NEXT:    movw $9362, %ax # imm = 0x2492
-; AVX512DQ-FAST-NEXT:    kmovw %eax, %k2
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm5, %zmm3, %zmm22 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [16,9,10,17,12,13,18,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r8), %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[2,1,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm1, %zmm18, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rsi), %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm5, %xmm2
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rcx), %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %xmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm3, %ymm31, %ymm3
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm5, %ymm5
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm23
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm5, %zmm3, %zmm23 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r8), %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[2,1,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm1, %zmm18, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm2
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm5[4],ymm3[4],ymm5[5],ymm3[5],ymm5[6],ymm3[6],ymm5[7],ymm3[7],ymm5[12],ymm3[12],ymm5[13],ymm3[13],ymm5[14],ymm3[14],ymm5[15],ymm3[15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[3,3,3,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %ymm6
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm6[4],ymm5[4],ymm6[5],ymm5[5],ymm6[6],ymm5[6],ymm6[7],ymm5[7],ymm6[12],ymm5[12],ymm6[13],ymm5[13],ymm6[14],ymm5[14],ymm6[15],ymm5[15]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm3, %ymm28, %ymm3
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm5 = ymm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm5[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm6 = ymm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm6[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[1],ymm5[1],ymm6[2],ymm5[2],ymm6[3],ymm5[3],ymm6[8],ymm5[8],ymm6[9],ymm5[9],ymm6[10],ymm5[10],ymm6[11],ymm5[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm25
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm3, %zmm5, %zmm25 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rsi), %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm2, %ymm1
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm1, %zmm20, %zmm26
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm29, %ymm25
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %ymm10
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm10, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm19
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %ymm13
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm13, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm9
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm9, %ymm0
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm9[4],ymm13[4],ymm9[5],ymm13[5],ymm9[6],ymm13[6],ymm9[7],ymm13[7],ymm9[12],ymm13[12],ymm9[13],ymm13[13],ymm9[14],ymm13[14],ymm9[15],ymm13[15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm0[3,3,3,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %ymm11
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm6 = ymm11[4],ymm3[4],ymm11[5],ymm3[5],ymm11[6],ymm3[6],ymm11[7],ymm3[7],ymm11[12],ymm3[12],ymm11[13],ymm3[13],ymm11[14],ymm3[14],ymm11[15],ymm3[15]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm6, %ymm28, %ymm6
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm7 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm8 = ymm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm11[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm8[0],ymm7[0],ymm8[1],ymm7[1],ymm8[2],ymm7[2],ymm8[3],ymm7[3],ymm8[8],ymm7[8],ymm8[9],ymm7[9],ymm8[10],ymm7[10],ymm8[11],ymm7[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm28
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm6, %zmm7, %zmm28 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm8
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm8, %ymm2
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm2, %zmm28, %zmm20
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm8[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm2, %ymm29, %ymm28
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %xmm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm2, %ymm6
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm3, %ymm8
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm8[0],ymm6[0],ymm8[1],ymm6[1],ymm8[2],ymm6[2],ymm8[3],ymm6[3],ymm8[8],ymm6[8],ymm8[9],ymm6[9],ymm8[10],ymm6[10],ymm8[11],ymm6[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[3,3,3,3]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm9 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm9, %ymm17, %ymm9
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm10 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm0 = ymm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm0[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm10[0],ymm0[1],ymm10[1],ymm0[2],ymm10[2],ymm0[3],ymm10[3],ymm0[8],ymm10[8],ymm0[9],ymm10[9],ymm0[10],ymm10[10],ymm0[11],ymm10[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm27
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm9, %zmm0, %zmm27 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, %zmm29
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r8), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm0, %zmm20, %zmm29
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm1[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm19, %ymm27
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r9), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm31
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm22
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rsi), %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %xmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm0, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm1, %xmm5
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %xmm1
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm6, %ymm31, %ymm6
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm8
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [1,0,2,2,1,0,2,2]
+; AVX512DQ-FAST-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm14, %xmm0
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm14[0],xmm7[0],xmm14[1],xmm7[1],xmm14[2],xmm7[2],xmm14[3],xmm7[3]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm9, %ymm10, %ymm9
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm14 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm12 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm1, %xmm24
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm12[0],xmm7[0],xmm12[1],xmm7[1],xmm12[2],xmm7[2],xmm12[3],xmm7[3]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3]
 ; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm7, %ymm7
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm5, %zmm29
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm7, %zmm6, %zmm29 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm29, %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %xmm0
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm8, %zmm25
+; AVX512DQ-FAST-NEXT:    movw $9362, %ax # imm = 0x2492
+; AVX512DQ-FAST-NEXT:    kmovw %eax, %k2
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm7, %zmm9, %zmm25 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [16,9,10,17,12,13,18,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm26
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r8), %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[2,1,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm2, %zmm18, %zmm17
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm7
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm1, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm7, %xmm4
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm6
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm15, %ymm31, %ymm15
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm14 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm0[2,1,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm6, %zmm16, %zmm26
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rsi), %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm14
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm14[4],xmm6[4],xmm14[5],xmm6[5],xmm14[6],xmm6[6],xmm14[7],xmm6[7]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm28, %xmm2
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm1, %ymm10, %ymm1
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
 ; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm14, %zmm31
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm15, %zmm31 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %xmm15
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm15[2,1,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm0, %zmm31, %zmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm16, %ymm4
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm10, %ymm0
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm0[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm2
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm2
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm27 = ymm2[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm5, %ymm2
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm11[0],ymm3[0],ymm11[1],ymm3[1],ymm11[2],ymm3[2],ymm11[3],ymm3[3],ymm11[8],ymm3[8],ymm11[9],ymm3[9],ymm11[10],ymm3[10],ymm11[11],ymm3[11]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <17,18,17,18,u,u,19,19,5,4,2,2,5,4,6,6>
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm0, %zmm11, %zmm5
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,1,1,1]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm9[0],ymm13[0],ymm9[1],ymm13[1],ymm9[2],ymm13[2],ymm9[3],ymm13[3],ymm9[8],ymm13[8],ymm9[9],ymm13[9],ymm9[10],ymm13[10],ymm9[11],ymm13[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm0, %zmm5 {%k1}
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm8[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [8,9,20,11,12,21,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm6
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm0, %zmm4, %zmm6
-; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm19 = xmm15[0],zero,xmm15[1],zero,xmm15[2],zero,xmm15[3],zero
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm15[12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [0,9,2,3,8,5,6,11]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm16, %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsp), %xmm0 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm14, %zmm11
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm1, %zmm11 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm28
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r8), %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[2,1,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm0, %zmm16, %zmm28
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm2, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm3, %ymm1
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[3,3,3,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm2
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm13[4],ymm2[5],ymm13[5],ymm2[6],ymm13[6],ymm2[7],ymm13[7],ymm2[12],ymm13[12],ymm2[13],ymm13[13],ymm2[14],ymm13[14],ymm2[15],ymm13[15]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm5, %ymm17, %ymm5
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm4 = ymm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm13[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm6 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm6[0],ymm4[0],ymm6[1],ymm4[1],ymm6[2],ymm4[2],ymm6[3],ymm4[3],ymm6[8],ymm4[8],ymm6[9],ymm4[9],ymm6[10],ymm4[10],ymm6[11],ymm4[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm13
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm5, %zmm4, %zmm13 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm0, %zmm20, %zmm4
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm1[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm19, %ymm13
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm14
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm31 = ymm0[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm9
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm24 = ymm0[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %ymm3
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm3, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm2, %ymm5
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm5[0],ymm0[0],ymm5[1],ymm0[1],ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[8],ymm0[8],ymm5[9],ymm0[9],ymm5[10],ymm0[10],ymm5[11],ymm0[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[3,3,3,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm3 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[2],mem[2],ymm1[3],mem[3],ymm1[8],mem[8],ymm1[9],mem[9],ymm1[10],mem[10],ymm1[11],mem[11]
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm0, %zmm11, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # xmm0 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm0[1,1,1,1]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm0[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm9 # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # xmm9 = xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11]
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm9, %zmm11, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm6 = ymm8[4],ymm1[4],ymm8[5],ymm1[5],ymm8[6],ymm1[6],ymm8[7],ymm1[7],ymm8[12],ymm1[12],ymm8[13],ymm1[13],ymm8[14],ymm1[14],ymm8[15],ymm1[15]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm6, %ymm17, %ymm6
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm7 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm15 = ymm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm8[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm15[0],ymm7[0],ymm15[1],ymm7[1],ymm15[2],ymm7[2],ymm15[3],ymm7[3],ymm15[8],ymm7[8],ymm15[9],ymm7[9],ymm15[10],ymm7[10],ymm15[11],ymm7[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm6, %zmm7, %zmm0 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm15
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm15, %ymm5
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm5, %zmm0, %zmm20
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm15[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm5, %ymm19, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm5
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm5[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm1, %ymm5
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm19 = ymm5[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %xmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm6, %xmm7
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm5, %xmm14
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm14[4],xmm7[4],xmm14[5],xmm7[5],xmm14[6],xmm7[6],xmm14[7],xmm7[7]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm17 = ymm7[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm18, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rsp), %xmm8 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm12, %ymm10, %ymm12
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm9 = xmm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm7, %ymm7
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm14, %zmm17
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm7, %zmm12, %zmm17 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm18
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %xmm1
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm1[2,1,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm1, %xmm30
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm9, %zmm16, %zmm18
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %xmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm14
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm9, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm14, %xmm12
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm12[4],xmm1[4],xmm12[5],xmm1[5],xmm12[6],xmm1[6],xmm12[7],xmm1[7]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm12, %ymm10, %ymm12
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm10 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm14[0],xmm9[0],xmm14[1],xmm9[1],xmm14[2],xmm9[2],xmm14[3],xmm9[3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm8, %ymm8
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm10, %zmm10
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm8, %zmm12, %zmm10 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %xmm12
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm12[2,1,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm1, %zmm10, %zmm16
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm14[4],xmm9[4],xmm14[5],xmm9[5],xmm14[6],xmm9[6],xmm14[7],xmm9[7]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm1[1,1,1,1]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm1[2,2,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm9 # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # xmm9 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[1,1,1,1]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # xmm1 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm1[1,1,1,1]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm14 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm14 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[2],mem[2],ymm1[3],mem[3],ymm1[8],mem[8],ymm1[9],mem[9],ymm1[10],mem[10],ymm1[11],mem[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm24, %xmm1
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm13 # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # xmm13 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm8 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm8 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[2],mem[2],ymm1[3],mem[3],ymm1[8],mem[8],ymm1[9],mem[9],ymm1[10],mem[10],ymm1[11],mem[11]
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm13, %zmm11, %zmm8
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm1 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[2],mem[2],ymm1[3],mem[3],ymm1[8],mem[8],ymm1[9],mem[9],ymm1[10],mem[10],ymm1[11],mem[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm1[2,2,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm11 # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # xmm11 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[1,1,1,1]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # xmm1 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm1[1,1,1,1]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm13 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm13 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[2],mem[2],ymm1[3],mem[3],ymm1[8],mem[8],ymm1[9],mem[9],ymm1[10],mem[10],ymm1[11],mem[11]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm30 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm30 = zmm30[0,1,2,3],mem[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm3 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[2],mem[2],ymm1[3],mem[3],ymm1[8],mem[8],ymm1[9],mem[9],ymm1[10],mem[10],ymm1[11],mem[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm21[0,1,2,3],zmm23[0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm24 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm21 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm30, %zmm1, %zmm24
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21, %zmm21 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm21 = zmm21[0,1,2,3],mem[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm30 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm21, %zmm1, %zmm30
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm25[0,1,2,3],zmm26[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm12, %zmm25
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm21, %zmm1, %zmm25
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm28[0,1,2,3],zmm20[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm27, %zmm21
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm20, %zmm1, %zmm21
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm10[0],zero,xmm10[1],zero,xmm10[2],zero,xmm10[3],zero
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [0,1,8,3,4,9,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm20, %ymm22
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm23, %zmm1, %zmm21
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm27 = zmm27[0,1,2,3],zmm29[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm23 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm27, %zmm1, %zmm23
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],zmm4[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm31, %zmm24
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm13, %zmm1, %zmm24
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm0[0,1,2,3],zmm20[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm22, %zmm19
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm13, %zmm1, %zmm19
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [0,1,8,3,4,9,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm13, %ymm25
+; AVX512DQ-FAST-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm13, %ymm11
+; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm30[0],zero,xmm30[1],zero,xmm30[2],zero,xmm30[3],zero
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm13, %ymm17
 ; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm12[0],zero,xmm12[1],zero,xmm12[2],zero,xmm12[3],zero
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm20, %ymm23
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm20, %ymm29
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm19, %ymm20, %ymm31
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm7, %zmm15, %zmm3 {%k1}
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm14, %zmm9, %zmm0 {%k1}
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm13[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm11, %zmm8 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r9), %xmm1
-; AVX512DQ-FAST-NEXT:    vpshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm7 = mem[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm9
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm7, %zmm4, %zmm9
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm1[0,0,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm7, %ymm19
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm10, %xmm11
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm11, %ymm16, %ymm3
-; AVX512DQ-FAST-NEXT:    vpshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm11 = mem[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, %zmm13
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm11, %zmm4, %zmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm12, %xmm14
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm14, %ymm16, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm1, %xmm14
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm14[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vpshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm15 = mem[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm15, %zmm8, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r9), %xmm15
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm2, %xmm7
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm15[0,0,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm12, %ymm12
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm7, %ymm16, %ymm8
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm15, %xmm7
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %xmm14
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm5[0,1,2,3],zmm6[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm14[0,0,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm6, %ymm6
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm9[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm14, %xmm9
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm26 = zmm0[0,1,2,3],zmm13[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %xmm13
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm13, %xmm11
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm8[0,1,2,3],zmm4[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm15, %xmm15
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm14, %xmm14
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm13, %xmm8
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,0,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm13, %ymm13
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm13, %ymm10
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <1,2,1,2,u,u,3,3,13,12,10,10,13,12,14,14>
+; AVX512DQ-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm13 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm14, %zmm9, %zmm13 {%k1}
+; AVX512DQ-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm9 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm2, %zmm8, %zmm9 {%k1}
+; AVX512DQ-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm2 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm3, %zmm7, %zmm2 {%k1}
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm15[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [8,9,20,11,12,21,14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm8
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm3, %zmm7, %zmm8
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm12, %xmm12
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [0,9,2,3,8,5,6,11]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm12, %ymm20, %ymm13
+; AVX512DQ-FAST-NEXT:    vpshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm12 = mem[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm15
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm12, %zmm7, %zmm15
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm12 = zmm25[0,1,2,3],zmm26[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm3, %xmm26
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm4, %ymm20, %ymm9
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm5 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11]
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r9), %xmm6
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm6[0,0,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm6, %xmm3
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm27
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm12, %zmm25, %zmm27
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm11[0,1,2,3],zmm28[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[1,1,1,1]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm5, %zmm4, %zmm1 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r9), %xmm4
+; AVX512DQ-FAST-NEXT:    vpshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm5 = mem[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm12
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm7, %zmm12
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm4[0,0,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm5, %ymm5
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm11
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm10 = mem[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm5 = mem[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm2 = mem[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm0 = mem[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm22 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm22 = zmm22[0,1,2,3],mem[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm19, %zmm19
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm22, %zmm20, %zmm19
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm22 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm22 = zmm23[0,1,2,3],mem[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm12, %zmm7
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm22, %zmm20, %zmm7
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm12 = zmm29[0,1,2,3],zmm17[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm6, %zmm6
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm12, %zmm20, %zmm6
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm31[0,1,2,3],zmm18[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm13, %zmm11
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm9, %zmm20, %zmm11
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm16, %zmm9, %zmm8
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm1
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm9, %zmm1
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm15, %zmm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm26, %zmm9, %zmm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm14, %zmm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm9, %zmm0
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm5, %zmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm26, %xmm28
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm26, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm11
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm11, %ymm20, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %xmm11
+; AVX512DQ-FAST-NEXT:    vpshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm3 = mem[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm3, %zmm1, %zmm7
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm11[0,0,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm3, %ymm26
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm30, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm28, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %xmm0
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm3, %ymm20, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm3
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm3[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm14
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm6, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm11, %xmm11
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm28 = ymm3[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm3 = mem[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm29 = ymm6[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm6 = mem[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm4[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm4 = mem[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm31 = ymm11[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm11 = mem[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm22, %zmm25, %zmm5
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 = zmm17[0,1,2,3],zmm18[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm26, %zmm18
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm17, %zmm25, %zmm18
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm10[0,1,2,3],zmm16[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm10, %zmm25, %zmm0
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm13[0,1,2,3],zmm8[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm28, %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm8, %zmm10, %zmm3
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm9[0,1,2,3],zmm15[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm29, %zmm6
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm8, %zmm10, %zmm6
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm12[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm30, %zmm4
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm10, %zmm4
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm7[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm31, %zmm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm10, %zmm2
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, 256(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 448(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 640(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, 64(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, (%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 192(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, 128(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, 320(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, 384(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, 576(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm30, 512(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, 704(%rax)
-; AVX512DQ-FAST-NEXT:    addq $1224, %rsp # imm = 0x4C8
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 256(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, 448(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 640(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, 64(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, 192(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, 128(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, 320(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, 384(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, 576(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, 512(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, 704(%rax)
+; AVX512DQ-FAST-NEXT:    addq $920, %rsp # imm = 0x398
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
index ff3279733322b..0b578bdd28389 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
@@ -89,10 +89,10 @@ define void @store_i16_stride7_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],mem[0],xmm2[1],mem[1]
 ; AVX2-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm2
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,ymm0[2,3],zero,zero,zero,zero,zero,zero,ymm0[18,19,22,23,26,27],zero,zero,zero,zero
+; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,4,5,8,9],zero,zero,ymm0[22,23,26,27,30,31],zero,zero,zero,zero,zero,zero,ymm0[24,25,20,21]
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,ymm2[2,3],zero,zero,zero,zero,zero,zero,ymm2[18,19,22,23,26,27],zero,zero,zero,zero
 ; AVX2-ONLY-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX2-ONLY-NEXT:    vpextrd $2, %xmm1, 24(%rax)
@@ -114,10 +114,10 @@ define void @store_i16_stride7_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX512F-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],mem[0],xmm2[1],mem[1]
 ; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm2
-; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,ymm0[2,3],zero,zero,zero,zero,zero,zero,ymm0[18,19,22,23,26,27,u,u,u,u]
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,4,5,8,9],zero,zero,ymm0[22,23,26,27,30,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,ymm2[2,3],zero,zero,zero,zero,zero,zero,ymm2[18,19,22,23,26,27,u,u,u,u]
 ; AVX512F-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX512F-NEXT:    vpextrd $2, %xmm1, 24(%rax)
@@ -139,12 +139,13 @@ define void @store_i16_stride7_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX512BW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],mem[0],xmm2[1],mem[1]
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,2,4,6,16,18,20,1,3,5,7,17,19,21,u,u>
-; AVX512BW-NEXT:    vpermi2w %ymm1, %ymm0, %ymm2
-; AVX512BW-NEXT:    vextracti128 $1, %ymm2, %xmm0
-; AVX512BW-NEXT:    vpextrd $2, %xmm0, 24(%rax)
-; AVX512BW-NEXT:    vmovq %xmm0, 16(%rax)
-; AVX512BW-NEXT:    vmovdqa %xmm2, (%rax)
+; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,2,4,6,8,10,12,1,3,5,7,9,11,13,u,u>
+; AVX512BW-NEXT:    vpermw %ymm0, %ymm1, %ymm0
+; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512BW-NEXT:    vpextrd $2, %xmm1, 24(%rax)
+; AVX512BW-NEXT:    vmovq %xmm1, 16(%rax)
+; AVX512BW-NEXT:    vmovdqa %xmm0, (%rax)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %in.vec0 = load <2 x i16>, ptr %in.vecptr0, align 64
@@ -352,22 +353,22 @@ define void @store_i16_stride7_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX2-FAST-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
 ; AVX2-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm4
-; AVX2-FAST-NEXT:    vmovq {{.*#+}} xmm5 = mem[0],zero
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm5[0],ymm0[2],ymm5[2]
+; AVX2-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <5,7,1,3,7,u,u,u>
-; AVX2-FAST-NEXT:    vpermd %ymm4, %ymm1, %ymm1
+; AVX2-FAST-NEXT:    vpermd %ymm2, %ymm1, %ymm1
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5],zero,zero,zero,zero,zero,zero,ymm1[10,11,14,15,2,3,18,19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [1,3,5,7,1,3,5,7]
-; AVX2-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX2-FAST-NEXT:    vpermd %ymm0, %ymm5, %ymm5
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,ymm5[0,1,4,5,8,9],zero,zero,zero,zero,zero,zero,zero,zero,ymm5[18,19,22,23,26,27],zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-FAST-NEXT:    vpor %ymm5, %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [1,3,5,7,1,3,5,7]
+; AVX2-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX2-FAST-NEXT:    vpermd %ymm0, %ymm3, %ymm3
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,ymm3[0,1,4,5,8,9],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[18,19,22,23,26,27],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-FAST-NEXT:    vpor %ymm3, %ymm1, %ymm1
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm2[0,1,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[2,3],zero,zero,ymm2[18,19,26,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,ymm2[0,1,8,9,4,5,6,7,4,5],zero,zero,ymm2[26,27],zero,zero,zero,zero,ymm2[24,25,20,21,22,23,20,21,28,29]
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm4[0,1,8,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[2,3],zero,zero,ymm4[18,19,26,27],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX2-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm2
 ; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,2,4,6,0,2,4,6]
 ; AVX2-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
@@ -478,28 +479,28 @@ define void @store_i16_stride7_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX512F-FAST-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
 ; AVX512F-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm4
-; AVX512F-FAST-NEXT:    vmovq {{.*#+}} xmm5 = mem[0],zero
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX512F-FAST-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,2,4,u>
-; AVX512F-FAST-NEXT:    vpermi2q %ymm5, %ymm0, %ymm1
+; AVX512F-FAST-NEXT:    vpermi2q %ymm3, %ymm0, %ymm1
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [0,2,4,6,0,2,4,6]
 ; AVX512F-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,1,4,5,8,9],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[18,19,22,23,26,27],zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [1,3,5,7,1,3,5,7]
-; AVX512F-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm5, %ymm1
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [1,3,5,7,1,3,5,7]
+; AVX512F-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm3, %ymm1
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,ymm1[0,1,4,5,8,9],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[18,19,22,23,26,27],zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm4[0,1,8,9],zero,zero,zero,zero,ymm4[u,u,u,u,u,u,2,3],zero,zero,ymm4[18,19,26,27,u,u,u,u,u,u],zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,ymm2[0,1,8,9,u,u,u,u,u,u],zero,zero,ymm2[26,27],zero,zero,zero,zero,ymm2[u,u,u,u,u,u,20,21,28,29]
-; AVX512F-FAST-NEXT:    vpternlogq $168, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,1,8,9],zero,zero,zero,zero,ymm2[u,u,u,u,u,u,2,3],zero,zero,ymm2[18,19,26,27,u,u,u,u,u,u],zero,zero,zero,zero
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm2[2,3,0,1]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,ymm3[0,1,8,9,u,u,u,u,u,u],zero,zero,ymm3[26,27],zero,zero,zero,zero,ymm3[u,u,u,u,u,u,20,21,28,29]
+; AVX512F-FAST-NEXT:    vpternlogq $168, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <5,7,1,3,7,u,u,u>
-; AVX512F-FAST-NEXT:    vpermd %ymm4, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vpermd %ymm2, %ymm1, %ymm1
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,1,4,5],zero,zero,zero,zero,zero,zero,ymm1[10,11,14,15,2,3,18,19],zero,zero,zero,zero,zero,zero,ymm1[u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
 ; AVX512F-FAST-NEXT:    vporq %zmm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vextracti32x4 $2, %zmm0, 32(%rax)
 ; AVX512F-FAST-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
@@ -550,8 +551,9 @@ define void @store_i16_stride7_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,2,4,u>
 ; AVX512BW-FAST-NEXT:    vpermi2q %ymm3, %ymm0, %ymm1
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,4,8,12,32,36,40,1,5,9,13,33,37,41,2,6,10,14,34,38,42,3,7,11,15,35,39,43,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpermi2w %zmm1, %zmm2, %zmm0
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm0
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,4,8,12,16,20,24,1,5,9,13,17,21,25,2,6,10,14,18,22,26,3,7,11,15,19,23,27,u,u,u,u>
+; AVX512BW-FAST-NEXT:    vpermw %zmm0, %zmm1, %zmm0
 ; AVX512BW-FAST-NEXT:    vextracti32x4 $2, %zmm0, 32(%rax)
 ; AVX512BW-FAST-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
 ; AVX512BW-FAST-NEXT:    vmovq %xmm1, 48(%rax)
@@ -739,7 +741,7 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX1-ONLY-NEXT:    vpsrld $16, %xmm4, %xmm5
 ; AVX1-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[2,2,2,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,5,4]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm6[0],xmm5[1,2],xmm6[3,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
@@ -792,7 +794,7 @@ define void @store_i16_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm9 = [65535,65535,65535,0,0,0,0,65535,65535,65535,0,0,0,0,65535,65535]
 ; AVX1-ONLY-NEXT:    vandnps %ymm8, %ymm9, %ymm8
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm10 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[1,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm10[2,2,2,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,5,4]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm1[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm10 = xmm10[0],xmm11[1],xmm10[2,3,4,5,6,7]
@@ -1569,7 +1571,7 @@ define void @store_i16_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vpsrld $16, %xmm8, %xmm9
 ; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm3[2],xmm9[2],xmm3[3],xmm9[3]
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm11 = xmm8[4],xmm3[4],xmm8[5],xmm3[5],xmm8[6],xmm3[6],xmm8[7],xmm3[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[1,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[2,2,2,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,5,4]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm9, %ymm11, %ymm9
 ; AVX1-ONLY-NEXT:    vandnps %ymm9, %ymm6, %ymm6
@@ -1709,7 +1711,7 @@ define void @store_i16_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vandps %ymm3, %ymm2, %ymm2
 ; AVX1-ONLY-NEXT:    vorps %ymm1, %ymm2, %ymm1
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm5[4],xmm11[4],xmm5[5],xmm11[5],xmm5[6],xmm11[6],xmm5[7],xmm11[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[2,2,2,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,4]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm12[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3,4,5,6,7]
@@ -2470,90 +2472,92 @@ define void @store_i16_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm2[u,u,u,u,u,u,14,15],zero,zero,ymm2[u,u,u,u,u,u,u,u,u,u,16,17],zero,zero,ymm2[u,u,u,u,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpor %ymm7, %ymm10, %ymm7
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm10
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm12
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm11 = xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
-; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm11[0,2,3,3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,2,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm11, %zmm14
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm14
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm11
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
+; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[0,2,3,3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,2,1]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm12, %zmm15
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm15
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm1[u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[14,15,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[16,17,u,u,u,u]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,14,15],zero,zero,ymm0[u,u,u,u,u,u,u,u,u,u,16,17],zero,zero,ymm0[u,u,u,u]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm17
 ; AVX512F-FAST-NEXT:    vpor %ymm6, %ymm7, %ymm6
-; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm11
+; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm12
 ; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm13
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm13[4],xmm11[4],xmm13[5],xmm11[5],xmm13[6],xmm11[6],xmm13[7],xmm11[7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm7, %xmm7
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm7, %xmm7
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm0, %xmm18
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,1,1]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm16
 ; AVX512F-FAST-NEXT:    vpbroadcastd 8(%r10), %ymm6
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
 ; AVX512F-FAST-NEXT:    vpandn %ymm6, %ymm7, %ymm6
 ; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm7[12,13,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm7[14,15,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm7[16,17,u,u]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm6, %zmm6
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm7[12,13,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm7[14,15,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm7[16,17,u,u]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm6, %zmm6
 ; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm6
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm6
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm10[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm12[1,1,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm14[0],xmm0[1],xmm14[2,3],xmm0[4],xmm14[5,6],xmm0[7]
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [0,0,1,1,8,8,9,9]
-; AVX512F-FAST-NEXT:    vpermi2q %zmm0, %zmm14, %zmm15
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
-; AVX512F-FAST-NEXT:    vprold $16, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[1,1,2,3]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1],xmm9[2],xmm8[3,4],xmm9[5],xmm8[6,7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm15, %zmm0
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm8[0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm15, %zmm6
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,1,3]
+; AVX512F-FAST-NEXT:    vprold $16, %xmm9, %xmm15
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm8[1,1,2,3]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm15[2],xmm0[3,4],xmm15[5],xmm0[6,7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm14, %zmm0
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[1,1,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm10 = xmm11[0],xmm10[1],xmm11[2,3],xmm10[4],xmm11[5,6],xmm10[7]
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm18, %xmm9
+; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm8, %xmm8
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm8 = zmm8[0,0,1,1,4,4,5,5]
+; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm8
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm0[0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,0,1]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,1,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm9
-; AVX512F-FAST-NEXT:    vpbroadcastd (%r10), %ymm8
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm9, %zmm0
+; AVX512F-FAST-NEXT:    vpbroadcastd (%r10), %ymm9
 ; AVX512F-FAST-NEXT:    vpbroadcastd 4(%r10), %ymm10
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm8
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm8
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm0 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm9[0,1],ymm0[2],ymm9[3,4],ymm0[5],ymm9[6,7,8,9],ymm0[10],ymm9[11,12],ymm0[13],ymm9[14,15]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm3[1,1,1,1,5,5,5,5]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,u,u,20,21,24,25,u,u,22,23,22,23]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm10[0,1],ymm9[2],ymm10[3,4],ymm9[5],ymm10[6,7,8,9],ymm9[10],ymm10[11,12],ymm9[13],ymm10[14,15]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [2,2,2,3,10,10,10,11]
-; AVX512F-FAST-NEXT:    vpermi2q %zmm0, %zmm9, %zmm10
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm9
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm4[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm2[0,1,1,3,4,5,5,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm9[0,1],ymm0[2],ymm9[3,4],ymm0[5],ymm9[6,7,8,9],ymm0[10],ymm9[11,12],ymm0[13],ymm9[14,15]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm2[0,1,1,3,4,5,5,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0,1],ymm0[2],ymm8[3,4],ymm0[5],ymm8[6,7,8,9],ymm0[10],ymm8[11,12],ymm0[13],ymm8[14,15]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,3,2]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,u,u,u,u,26,27,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm3[2,2,2,2,6,6,6,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm11[0],ymm9[1],ymm11[2,3],ymm9[4],ymm11[5,6,7,8],ymm9[9],ymm11[10,11],ymm9[12],ymm11[13,14,15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm0
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm17[0,0,2,1,4,4,6,5]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,u,u,u,u,26,27,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm10 = ymm3[2,2,2,2,6,6,6,6]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm10[0],ymm8[1],ymm10[2,3],ymm8[4],ymm10[5,6,7,8],ymm8[9],ymm10[10,11],ymm8[12],ymm10[13,14,15]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,2,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm2[2,2,2,2,6,6,6,6]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm10[0,1],ymm8[2],ymm10[3,4],ymm8[5],ymm10[6,7,8,9],ymm8[10],ymm10[11,12],ymm8[13],ymm10[14,15]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm10 = ymm3[1,1,1,1,5,5,5,5]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,u,u,20,21,24,25,u,u,22,23,22,23]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm11[0,1],ymm10[2],ymm11[3,4],ymm10[5],ymm11[6,7,8,9],ymm10[10],ymm11[11,12],ymm10[13],ymm11[14,15]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm10, %zmm8
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm8 = zmm8[2,2,2,3,6,6,6,7]
+; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm8
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm0 = ymm17[0,0,2,1,4,4,6,5]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,u,u,18,19,20,21,u,u,20,21]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm9 = ymm10[0,1,2],ymm9[3],ymm10[4,5],ymm9[6],ymm10[7,8,9,10],ymm9[11],ymm10[12,13],ymm9[14],ymm10[15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm10[0,1,2],ymm0[3],ymm10[4,5],ymm0[6],ymm10[7,8,9,10],ymm0[11],ymm10[12,13],ymm0[14],ymm10[15]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vprold $16, %ymm1, %ymm10
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm17[1,2,2,3,5,6,6,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm11[0,1],ymm10[2],ymm11[3,4],ymm10[5],ymm11[6,7,8,9],ymm10[10],ymm11[11,12],ymm10[13],ymm11[14,15]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,1,3,2]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [0,13,4,0,0,14,5,0,0,13,4,0,0,14,5,0]
 ; AVX512F-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-FAST-NEXT:    vpermd %zmm7, %zmm10, %zmm10
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm10
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm10
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm10
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,14,15,u,u,u,u,u,u,u,u,28,29,u,u,u,u,30,31,u,u]
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[3,3,3,3,7,7,7,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1,2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7,8,9,10],ymm0[11],ymm3[12,13],ymm0[14],ymm3[15]
@@ -2573,7 +2577,7 @@ define void @store_i16_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm1
 ; AVX512F-FAST-NEXT:    vmovdqa %ymm1, 192(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm10, 128(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, (%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, (%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, 64(%rax)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
@@ -3717,7 +3721,7 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm1 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[2,2,2,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,5,4]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm13[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm7[1],xmm3[2,3,4,5,6,7]
@@ -3746,7 +3750,7 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[2,2,2,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,5,4]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm5[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm7[1],xmm3[2,3,4,5,6,7]
@@ -3808,7 +3812,7 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[2,2,2,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,5,4]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm11 = xmm10[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm11[1],xmm5[2,3,4,5,6,7]
@@ -4978,339 +4982,351 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-SLOW-LABEL: store_i16_stride7_vf32:
 ; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    subq $648, %rsp # imm = 0x288
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm3
+; AVX512F-SLOW-NEXT:    subq $792, %rsp # imm = 0x318
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm1, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm20
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm2
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm3, %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm19
-; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm2, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm2, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm26
+; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm2, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm21
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm3, %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm21
-; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm2, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %ymm4
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm3, %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm22
+; AVX512F-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %ymm14
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm4, %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm9
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm9, %ymm3
-; AVX512F-SLOW-NEXT:    vporq %ymm2, %ymm3, %ymm18
-; AVX512F-SLOW-NEXT:    vmovdqa64 (%r9), %xmm17
-; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm10
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm10, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm8
-; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm8, %ymm5
-; AVX512F-SLOW-NEXT:    vpor %ymm3, %ymm5, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm12
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm12, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm11
-; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm11, %ymm3
-; AVX512F-SLOW-NEXT:    vpor %ymm1, %ymm3, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %ymm14
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm14, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm15
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm15, %ymm0
-; AVX512F-SLOW-NEXT:    vporq %ymm1, %ymm0, %ymm16
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm7
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,7,6]
+; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm15
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm15, %ymm2
+; AVX512F-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm11
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm10
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm10, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm12
+; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm12, %ymm1
+; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm5
+; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm5, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm4
+; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm4, %ymm2
+; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %ymm13
+; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm13, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm7
+; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm7, %ymm2
+; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm11[0],xmm3[0],xmm11[1],xmm3[1],xmm11[2],xmm3[2],xmm11[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm3, %xmm6
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,5,7,6]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,3,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,1,0,1,0,1,1,3,16,18,19,19,19,19,u,u>
-; AVX512F-SLOW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm3
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm9[3,3,3,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm4[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,3,6,6,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [4,5,4,5,4,5,6,7,16,17,16,17,16,17,17,19]
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %xmm1
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm3[0,1,3,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermi2d %zmm6, %zmm5, %zmm13
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm7[4],xmm2[5],xmm7[5],xmm2[6],xmm7[6],xmm2[7],xmm7[7]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <0,0,1,1,8,9,u,11>
-; AVX512F-SLOW-NEXT:    vpermi2q %zmm18, %zmm2, %zmm5
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,1,0,1,0,1,1,3,16,18,19,19,19,19,u,u>
+; AVX512F-SLOW-NEXT:    vpermi2d %zmm2, %zmm0, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm15[3,3,3,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm14[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,3,6,6,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7,8,9],ymm0[10],ymm2[11,12],ymm0[13],ymm2[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [4,5,4,5,4,5,6,7,16,17,16,17,16,17,17,19]
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %xmm0
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm9[0,1,3,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpermi2d %zmm3, %zmm2, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm9[0,1,2,3,4,5,7,6]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm17
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <16,18,19,19,19,19,u,u,0,1,0,1,2,3,2,3>
+; AVX512F-SLOW-NEXT:    vpermi2d %zmm2, %zmm0, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vprold $16, %ymm14, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm15[1,2,2,3,5,6,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7,8,9],ymm0[10],ymm2[11,12],ymm0[13],ymm2[14,15]
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rax), %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,4,5,7,6]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <16,18,19,19,19,19,u,u,0,1,0,1,2,3,2,3>
-; AVX512F-SLOW-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm15[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm14[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,0,0,0,4,4,4,4]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7,8,9,10],ymm0[11],ymm3[12,13],ymm0[14],ymm3[15]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [2,2,3,3,10,9,11,10]
+; AVX512F-SLOW-NEXT:    vpermi2q %zmm2, %zmm3, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm0
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,2,3,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,2,1]
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm5, %ymm0
-; AVX512F-SLOW-NEXT:    vpbroadcastd 8(%rax), %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa (%rax), %ymm15
+; AVX512F-SLOW-NEXT:    vpbroadcastd 8(%rax), %ymm2
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpandn %ymm1, %ymm3, %ymm1
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm27
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm10[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm8[1,1,1,1,5,5,5,5]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm31
-; AVX512F-SLOW-NEXT:    vprold $16, %ymm4, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm9[1,2,2,3,5,6,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm9[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,0,0,0,4,4,4,4]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3],ymm3[4,5],ymm1[6],ymm3[7,8,9,10],ymm1[11],ymm3[12,13],ymm1[14],ymm3[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [2,2,3,3,10,9,11,10]
-; AVX512F-SLOW-NEXT:    vpermi2q %zmm0, %zmm1, %zmm3
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm11[0,1,1,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm12[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm28
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm14[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm15[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8,9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = <0,1,u,3,10,10,11,11>
-; AVX512F-SLOW-NEXT:    vpermi2q %zmm0, %zmm16, %zmm26
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rax), %ymm4
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm4, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm4[0,1,1,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512F-SLOW-NEXT:    vpandn %ymm2, %ymm3, %ymm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm16
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm11[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm29
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm11[3,3,3,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm12[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6,7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm30
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm10[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm8[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm20
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm8[3,3,3,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm10[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7,8,9,10],ymm2[11],ymm1[12,13],ymm2[14],ymm1[15]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm15, %ymm9
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm2, %zmm2
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm11[4],xmm6[4],xmm11[5],xmm6[5],xmm11[6],xmm6[6],xmm11[7],xmm6[7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm10[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm12[1,1,1,1,5,5,5,5]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm9[0,1],ymm11[2],ymm9[3,4],ymm11[5],ymm9[6,7,8,9],ymm11[10],ymm9[11,12],ymm11[13],ymm9[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm31
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm5[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[0,0,0,0,4,4,4,4]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm4[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm11[0,1],ymm9[2],ymm11[3,4],ymm9[5],ymm11[6,7,8,9],ymm9[10],ymm11[11,12],ymm9[13],ymm11[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm29
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rax), %ymm11
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm11[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpandn %ymm9, %ymm14, %ymm9
+; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm11, %ymm3
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm3, %zmm2
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm13[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,0,0,0,4,4,4,4]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm7[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm3[0,1,2],ymm9[3],ymm3[4,5],ymm9[6],ymm3[7,8,9,10],ymm9[11],ymm3[12,13],ymm9[14],ymm3[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm30
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm4[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1],ymm14[2],ymm3[3,4],ymm14[5],ymm3[6,7,8,9],ymm14[10],ymm3[11,12],ymm14[13],ymm3[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm18
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm4[3,3,3,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm5[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6,7,8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14,15]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm10[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm12[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7,8],ymm4[9],ymm5[10,11],ymm4[12],ymm5[13,14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm19
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm12[3,3,3,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0,1,2],ymm10[3],ymm4[4,5],ymm10[6],ymm4[7,8,9,10],ymm10[11],ymm4[12,13],ymm10[14],ymm4[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm16
+; AVX512F-SLOW-NEXT:    vprold $16, %ymm13, %ymm4
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm7[1,2,2,3,5,6,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm12[0,1],ymm4[2],ymm12[3,4],ymm4[5],ymm12[6,7,8,9],ymm4[10],ymm12[11,12],ymm4[13],ymm12[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm27
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm7[3,3,3,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm13[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,2,2,3,6,6,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm7[0,1],ymm4[2],ymm7[3,4],ymm4[5],ymm7[6,7,8,9],ymm4[10],ymm7[11,12],ymm4[13],ymm7[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm7
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
+; AVX512F-SLOW-NEXT:    vprold $16, %xmm7, %xmm7
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm7[2],xmm4[3,4],xmm7[5],xmm4[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3],xmm1[4],xmm0[5,6],xmm1[7]
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vprold $16, %ymm14, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm15[1,2,2,3,5,6,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0,1],ymm1[2],ymm8[3,4],ymm1[5],ymm8[6,7,8,9],ymm1[10],ymm8[11,12],ymm1[13],ymm8[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm24
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm15[3,3,3,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm14[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[2,2,2,3,6,6,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm10[0,1],ymm1[2],ymm10[3,4],ymm1[5],ymm10[6,7,8,9],ymm1[10],ymm10[11,12],ymm1[13],ymm10[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm9
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm9[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm26[3,3,3,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm7[0],ymm0[1],ymm7[2,3],ymm0[4],ymm7[5,6,7,8],ymm0[9],ymm7[10,11],ymm0[12],ymm7[13,14,15]
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm10
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm10[4],xmm2[4],xmm10[5],xmm2[5],xmm10[6],xmm2[6],xmm10[7],xmm2[7]
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm2[0],xmm10[0],xmm2[1],xmm10[1],xmm2[2],xmm10[2],xmm2[3],xmm10[3]
-; AVX512F-SLOW-NEXT:    vprold $16, %xmm10, %xmm10
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm10[2],xmm2[3,4],xmm10[5],xmm2[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm14
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm14[4],xmm2[4],xmm14[5],xmm2[5],xmm14[6],xmm2[6],xmm14[7],xmm2[7]
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm14[0],xmm2[0],xmm14[1],xmm2[1],xmm14[2],xmm2[2],xmm14[3],xmm2[3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm22
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm14[0],xmm2[1],xmm14[2,3],xmm2[4],xmm14[5,6],xmm2[7]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, (%rsp) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm7[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm11
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm19[3,3,3,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm15[0],ymm2[1],ymm15[2,3],ymm2[4],ymm15[5,6,7,8],ymm2[9],ymm15[10,11],ymm2[12],ymm15[13,14,15]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm8[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm21[3,3,3,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm12[0,1,2],ymm2[3],ymm12[4,5],ymm2[6],ymm12[7,8,9,10],ymm2[11],ymm12[12,13],ymm2[14],ymm12[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm25
-; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm0, %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm12
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm9
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm12[0],xmm9[0],xmm12[1],xmm9[1],xmm12[2],xmm9[2],xmm12[3],xmm9[3]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm10, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512F-SLOW-NEXT:    vprold $16, %xmm9, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm12[1,1,2,3]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm14[0,1],xmm0[2],xmm14[3,4],xmm0[5],xmm14[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm19
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm9[4],xmm12[4],xmm9[5],xmm12[5],xmm9[6],xmm12[6],xmm9[7],xmm12[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm18
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm12
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm12, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm10
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm10[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm22[3,3,3,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm12[0,1,2],ymm0[3],ymm12[4,5],ymm0[6],ymm12[7,8,9,10],ymm0[11],ymm12[12,13],ymm0[14],ymm12[15]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm6
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm2, %xmm12
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm7
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm0
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm2, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm23
+; AVX512F-SLOW-NEXT:    vprold $16, %xmm0, %xmm2
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm1[1,1,2,3]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm14[0,1],xmm2[2],xmm14[3,4],xmm2[5],xmm14[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm24
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm1
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm1, %xmm4
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm14
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm14[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0],xmm3[1],xmm13[2,3],xmm3[4],xmm13[5,6],xmm3[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm17
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm14[0],xmm12[0],xmm14[1],xmm12[1],xmm14[2],xmm12[2],xmm14[3],xmm12[3]
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm14[4],xmm12[4],xmm14[5],xmm12[5],xmm14[6],xmm12[6],xmm14[7],xmm12[7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm7[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0],xmm4[1],xmm13[2,3],xmm4[4],xmm13[5,6],xmm4[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm25
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm14[0],xmm1[0],xmm14[1],xmm1[1],xmm14[2],xmm1[2],xmm14[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm14[4],xmm1[4],xmm14[5],xmm1[5],xmm14[6],xmm1[6],xmm14[7],xmm1[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm21
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm0
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm0[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[0,0,0,0,4,4,4,4]
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm11, %ymm2
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm11[0,1,1,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0,1],ymm14[2],ymm11[3,4],ymm14[5],ymm11[6,7,8,9],ymm14[10],ymm11[11,12],ymm14[13],ymm11[14,15]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm14[0,1],ymm15[2],ymm14[3,4],ymm15[5],ymm14[6,7,8,9],ymm15[10],ymm14[11,12],ymm15[13],ymm14[14,15]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm15 = ymm8[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm8, %ymm9
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm21[1,1,1,1,5,5,5,5]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm15[0,1],ymm7[2],ymm15[3,4],ymm7[5],ymm15[6,7,8,9],ymm7[10],ymm15[11,12],ymm7[13],ymm15[14,15]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,2,3,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[2,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,5,4]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,1,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm31[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[2,1,3,2]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm29 = ymm29[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm30[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm31 = ymm20[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm2 = mem[2,1,3,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm24[2,1,3,2]
-; AVX512F-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm3 = mem[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm21[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm9[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm10[1],ymm0[2,3],ymm10[4],ymm0[5,6,7,8],ymm10[9],ymm0[10,11],ymm10[12],ymm0[13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm4[0,1,2,2,4,5,6,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,3,3,3,6,7,7,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm26[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm9[0,1],ymm14[2],ymm9[3,4],ymm14[5],ymm9[6,7,8,9],ymm14[10],ymm9[11,12],ymm14[13],ymm9[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm28
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm26[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm14[0,1],ymm7[2],ymm14[3,4],ymm7[5],ymm14[6,7,8,9],ymm7[10],ymm14[11,12],ymm7[13],ymm14[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm20
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm10[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm10, %ymm7
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm22[1,1,1,1,5,5,5,5]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm14[0,1],ymm10[2],ymm14[3,4],ymm10[5],ymm14[6,7,8,9],ymm10[10],ymm14[11,12],ymm10[13],ymm14[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm17
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[2,1,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,5,4]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,1,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm21, %zmm5
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm27
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm27
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm1, %zmm1
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm28, %zmm5, %zmm5
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm5
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm26, %zmm16
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm16
-; AVX512F-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm1 = mem[0,0,2,1]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm5
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,1,3,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpermq $80, (%rsp), %ymm15 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm15 = mem[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm20 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm20 = mem[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm31[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm29 = ymm29[2,1,3,2]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm30[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm31 = ymm18[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm19[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm16[2,1,3,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm27[2,1,3,2]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm22[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm9 = ymm7[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm9[1],ymm0[2,3],ymm9[4],ymm0[5,6,7,8],ymm9[9],ymm0[10,11],ymm9[12],ymm0[13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm11[0,1,2,2,4,5,6,6]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm11[2,3,3,3,6,7,7,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm16 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm16, %zmm22, %zmm3
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm16 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm16[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm6
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm6
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm3, %zmm3
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm29, %zmm7, %zmm14
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm14
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm3
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm7[0,1,2,3],zmm3[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm3
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm3
+; AVX512F-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm14 = mem[0,0,2,1]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,1,3,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm16 = mem[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm18 = mem[0,2,2,3]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,1,3,2,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm25[2,1,3,3]
+; AVX512F-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm19 = mem[2,1,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm19[0,0,2,1]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm9
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[2,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm9 = xmm9[0,1,2,3,4,5,5,4]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,1,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm26 = ymm17[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[0,2,3,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,2,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,1,3,2]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[0,0,2,1]
+; AVX512F-SLOW-NEXT:    vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # xmm10 = mem[2,1,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,5,4]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,1,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm26 = ymm25[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm7
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[0,2,3,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[2,1,3,2]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm29 = ymm20[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm17[2,2,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm29, %zmm28
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm31, %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm28, %zmm29, %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm31 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm28 = ymm31[2,3,3,3,6,7,7,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm30 = ymm31[0,1,1,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm31 = ymm31[0,1,2,2,4,5,6,6]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm7, %zmm3
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm3
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm10[2,1,3,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,1,3,2]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm6, %zmm1
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm5[0,1,1,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm3, %zmm3
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm3
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm13[0,1,1,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm20, %zmm1
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm22, %zmm4
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm29, %zmm4
-; AVX512F-SLOW-NEXT:    vpbroadcastd (%rax), %ymm1
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm4, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm4, %zmm1
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm15[2,3,3,3,6,7,7,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm31 = ymm15[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[0,1,2,2,4,5,6,6]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm27, %zmm5
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm5
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm9[2,1,3,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm11[2,1,3,2]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm1, %zmm1
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm1
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm12, %zmm5
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,1,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm8, %zmm8
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm8
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm13[0,1,1,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm18, %zmm5
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm19, %zmm9
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm5, %zmm4, %zmm9
+; AVX512F-SLOW-NEXT:    vpbroadcastd (%rax), %ymm4
 ; AVX512F-SLOW-NEXT:    vpbroadcastd 4(%rax), %ymm5
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm1
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm1
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm1
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm28[2,1,3,2]
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm4
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm4
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,3,2]
 ; AVX512F-SLOW-NEXT:    vpbroadcastd 32(%rax), %ymm5
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm3
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm2
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm3
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm3
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm24, %zmm4
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm26, %zmm5
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm4, %zmm21, %zmm5
-; AVX512F-SLOW-NEXT:    vpbroadcastd 36(%rax), %ymm4
-; AVX512F-SLOW-NEXT:    vpbroadcastd 40(%rax), %ymm6
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm4
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm4
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm4
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm11, %zmm5
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm0
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm0
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm30[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm31[2,1,3,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm5
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm5
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm5
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm2
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm2
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm24, %zmm5
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm26, %zmm7
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm5, %zmm22, %zmm7
+; AVX512F-SLOW-NEXT:    vpbroadcastd 36(%rax), %ymm5
+; AVX512F-SLOW-NEXT:    vpbroadcastd 40(%rax), %ymm8
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm5
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm5
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm5
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm29, %zmm28, %zmm7
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm30, %zmm0
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm0
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm31[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm15[2,1,3,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm7
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm7
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm16, 320(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm5, 128(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm4, 256(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm3, 192(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm27, 64(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm1, (%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, 384(%rax)
-; AVX512F-SLOW-NEXT:    addq $648, %rsp # imm = 0x288
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm7, 128(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm3, 320(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm5, 256(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, 192(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm6, 64(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm4, (%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm1, 384(%rax)
+; AVX512F-SLOW-NEXT:    addq $792, %rsp # imm = 0x318
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
@@ -5323,53 +5339,53 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm1, %ymm1
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm2
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm2, %ymm2
 ; AVX512F-FAST-NEXT:    vpor %ymm1, %ymm2, %ymm1
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm1
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm6
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
-; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm3
+; AVX512F-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm3
+; AVX512F-FAST-NEXT:    vpor %ymm1, %ymm3, %ymm1
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm8
+; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm9
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm8, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm15
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm15, %ymm3
-; AVX512F-FAST-NEXT:    vporq %ymm2, %ymm3, %ymm19
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm9, %ymm3
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm10
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm10, %ymm4
+; AVX512F-FAST-NEXT:    vporq %ymm3, %ymm4, %ymm19
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm11
-; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm11, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %xmm7
+; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm6, %ymm4
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm12
-; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm12, %ymm4
-; AVX512F-FAST-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm12, %ymm2
+; AVX512F-FAST-NEXT:    vpor %ymm4, %ymm2, %ymm2
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm13
 ; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm13, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm14
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm14, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm14, %ymm2
 ; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm2, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %ymm9
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm9, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %ymm15
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm15, %ymm1
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm0, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm2
 ; AVX512F-FAST-NEXT:    vporq %ymm1, %ymm2, %ymm17
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,1,3,8,8,9,9]
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm6, %xmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm7, %xmm2
 ; AVX512F-FAST-NEXT:    vpermi2q %zmm1, %zmm2, %zmm4
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm1
@@ -5378,114 +5394,115 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm5
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [0,0,0,1,8,9,9,11]
-; AVX512F-FAST-NEXT:    vpermi2q %zmm5, %zmm4, %zmm10
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,0,0,1,8,9,9,11]
+; AVX512F-FAST-NEXT:    vpermi2q %zmm5, %zmm4, %zmm11
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,0,1,1,8,9,u,11>
-; AVX512F-FAST-NEXT:    vpermi2q %zmm19, %zmm1, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31>
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm6, %xmm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm8, %ymm2
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm15[3,3,3,3,7,7,7,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm0, %zmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <0,0,1,1,12,13,u,15>
+; AVX512F-FAST-NEXT:    vpermi2q %zmm2, %zmm1, %zmm4
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31>
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm7, %xmm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm9, %ymm2
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm10[3,3,3,3,7,7,7,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7,8,9],ymm3[10],ymm2[11,12],ymm3[13],ymm2[14,15]
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [2,2,2,3,8,8,8,9]
 ; AVX512F-FAST-NEXT:    vpermi2q %zmm1, %zmm2, %zmm3
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vprold $16, %ymm8, %ymm1
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm15[1,2,2,3,5,6,6,7]
+; AVX512F-FAST-NEXT:    vprold $16, %ymm9, %ymm1
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm10[1,2,2,3,5,6,6,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21>
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm8, %ymm1
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm15[0,0,2,1,4,4,6,5]
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm9, %ymm1
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm10[0,0,2,1,4,4,6,5]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm1[0,1,2],ymm3[3],ymm1[4,5],ymm3[6],ymm1[7,8,9,10],ymm3[11],ymm1[12,13],ymm3[14],ymm1[15]
-; AVX512F-FAST-NEXT:    vmovdqa (%rax), %ymm10
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm26 = [2,2,3,3,10,9,11,10]
-; AVX512F-FAST-NEXT:    vpermi2q %zmm2, %zmm3, %zmm26
+; AVX512F-FAST-NEXT:    vmovdqa (%rax), %ymm11
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [2,2,3,3,10,9,11,10]
+; AVX512F-FAST-NEXT:    vpermi2q %zmm2, %zmm3, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm10, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm11, %ymm3
 ; AVX512F-FAST-NEXT:    vpbroadcastd 8(%rax), %ymm4
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpandn %ymm4, %ymm6, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpandn %ymm4, %ymm7, %ymm4
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm28
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm12[1,1,1,1,5,5,5,5]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7,8,9],ymm4[10],ymm3[11,12],ymm4[13],ymm3[14,15]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm13[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm14[0,1,1,3,4,5,5,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm6[0,1],ymm3[2],ymm6[3,4],ymm3[5],ymm6[6,7,8,9],ymm3[10],ymm6[11,12],ymm3[13],ymm6[14,15]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm14[0,1,1,3,4,5,5,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm7[0,1],ymm3[2],ymm7[3,4],ymm3[5],ymm7[6,7,8,9],ymm3[10],ymm7[11,12],ymm3[13],ymm7[14,15]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm30
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm9, %ymm5
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm0[0,0,2,1,4,4,6,5]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7,8,9,10],ymm6[11],ymm5[12,13],ymm6[14],ymm5[15]
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm15, %ymm5
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm0[0,0,2,1,4,4,6,5]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1,2],ymm7[3],ymm5[4,5],ymm7[6],ymm5[7,8,9,10],ymm7[11],ymm5[12,13],ymm7[14],ymm5[15]
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <0,1,u,3,10,10,11,11>
 ; AVX512F-FAST-NEXT:    vpermi2q %zmm5, %zmm17, %zmm25
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,4,u,u,u,5,u>
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rax), %ymm8
-; AVX512F-FAST-NEXT:    vpermd %ymm8, %ymm5, %ymm5
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpandn %ymm5, %ymm6, %ymm5
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm8, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rax), %ymm9
+; AVX512F-FAST-NEXT:    vpermd %ymm9, %ymm5, %ymm5
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpandn %ymm5, %ymm7, %ymm5
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm9, %ymm2
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm17
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm11[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm12[2,2,2,2,6,6,6,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm6[0],ymm2[1],ymm6[2,3],ymm2[4],ymm6[5,6,7,8],ymm2[9],ymm6[10,11],ymm2[12],ymm6[13,14,15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm6[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm12[2,2,2,2,6,6,6,6]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm7[0],ymm2[1],ymm7[2,3],ymm2[4],ymm7[5,6,7,8],ymm2[9],ymm7[10,11],ymm2[12],ymm7[13,14,15]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm31
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm12[3,3,3,3,7,7,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm11[0,1,2],ymm2[3],ymm11[4,5],ymm2[6],ymm11[7,8,9,10],ymm2[11],ymm11[12,13],ymm2[14],ymm11[15]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27>
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm13, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm12[3,3,3,3,7,7,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm6[0,1,2],ymm2[3],ymm6[4,5],ymm2[6],ymm6[7,8,9,10],ymm2[11],ymm6[12,13],ymm2[14],ymm6[15]
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm14[2,2,2,2,6,6,6,6]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm12[2],ymm2[3,4],ymm12[5],ymm2[6,7,8,9],ymm12[10],ymm2[11,12],ymm12[13],ymm2[14,15]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm13, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm13, %ymm2
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm14[3,3,3,3,7,7,7,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm13[0],ymm2[1],ymm13[2,3],ymm2[4],ymm13[5,6,7,8],ymm2[9],ymm13[10,11],ymm2[12],ymm13[13,14,15]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vprold $16, %ymm9, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm24
+; AVX512F-FAST-NEXT:    vprold $16, %ymm15, %ymm2
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm14 = ymm0[1,2,2,3,5,6,6,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm14[0,1],ymm2[2],ymm14[3,4],ymm2[5],ymm14[6,7,8,9],ymm2[10],ymm14[11,12],ymm2[13],ymm14[14,15]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm9, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm15, %ymm2
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[3,3,3,3,7,7,7,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3,4],ymm0[5],ymm2[6,7,8,9],ymm0[10],ymm2[11,12],ymm0[13],ymm2[14,15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm22
 ; AVX512F-FAST-NEXT:    vmovdqa64 (%rax), %zmm27
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = <u,5,u,u,u,6,u,u,30,u,u,u,31,u,u,31>
-; AVX512F-FAST-NEXT:    vpermi2d %zmm27, %zmm8, %zmm29
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm8
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm9
-; AVX512F-FAST-NEXT:    vprold $16, %xmm9, %xmm2
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm7 = xmm8[1,1,2,3]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0,1],xmm2[2],xmm7[3,4],xmm2[5],xmm7[6,7]
+; AVX512F-FAST-NEXT:    vpermi2d %zmm27, %zmm9, %zmm29
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm9
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm10
+; AVX512F-FAST-NEXT:    vprold $16, %xmm10, %xmm2
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm9[1,1,2,3]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm8[0,1],xmm2[2],xmm8[3,4],xmm2[5],xmm8[6,7]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm0
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
 ; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm6
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
 ; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm5, %xmm2
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm5
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm2
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm5, %xmm3
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm12 = xmm2[1,1,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm12[0],xmm3[1],xmm12[2,3],xmm3[4],xmm12[5,6],xmm3[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm21
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm2[1,1,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm8[0],xmm3[1],xmm8[2,3],xmm3[4],xmm8[5,6],xmm3[7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm19
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
 ; AVX512F-FAST-NEXT:    vprold $16, %xmm0, %xmm0
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6,7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm22
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm21
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm1
 ; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm1, %xmm4
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm0
@@ -5493,46 +5510,45 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,2]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2,3],xmm4[4],xmm0[5,6],xmm4[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm24
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm4
+; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm0, %ymm4
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm14 = ymm7[3,3,3,3,7,7,7,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm14[0],ymm4[1],ymm14[2,3],ymm4[4],ymm14[5,6,7,8],ymm4[9],ymm14[10,11],ymm4[12],ymm14[13,14,15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm4, %ymm20
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm4, %ymm26
 ; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
 ; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm18
 ; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm2, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm20
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm2[3,3,3,3,7,7,7,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm15[0,1,2],ymm5[3],ymm15[4,5],ymm5[6],ymm15[7,8,9,10],ymm5[11],ymm15[12,13],ymm5[14],ymm15[15]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm4, %ymm16
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3]
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm12, %xmm9
 ; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm8, %xmm8
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm12 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm9, %xmm9
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm7[0,1,1,3,4,5,5,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm15[0,1],ymm12[2],ymm15[3,4],ymm12[5],ymm15[6,7,8,9],ymm12[10],ymm15[11,12],ymm12[13],ymm15[14,15]
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm0, %ymm15
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm10 = ymm15[0,1],ymm10[2],ymm15[3,4],ymm10[5],ymm15[6,7,8,9],ymm10[10],ymm15[11,12],ymm10[13],ymm15[14,15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm7[2,2,2,2,6,6,6,6]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm15[0,1],ymm13[2],ymm15[3,4],ymm13[5],ymm15[6,7,8,9],ymm13[10],ymm15[11,12],ymm13[13],ymm15[14,15]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm2[1,1,1,1,5,5,5,5]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm15[0,1],ymm7[2],ymm15[3,4],ymm7[5],ymm15[6,7,8,9],ymm7[10],ymm15[11,12],ymm7[13],ymm15[14,15]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm1[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm11 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0],ymm15[1],ymm11[2,3],ymm15[4],ymm11[5,6,7,8],ymm15[9],ymm11[10,11],ymm15[12],ymm11[13,14,15]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm2[2,2,2,2,6,6,6,6]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0],ymm15[1],ymm12[2,3],ymm15[4],ymm12[5,6,7,8],ymm15[9],ymm12[10,11],ymm15[12],ymm12[13,14,15]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <6,u,u,u,7,u,u,7>
-; AVX512F-FAST-NEXT:    vpermd %ymm10, %ymm15, %ymm15
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm27, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpermd %ymm11, %ymm15, %ymm15
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm27, %zmm11
 ; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,2,3,3,4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,1,3]
@@ -5540,19 +5556,18 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    # ymm27 = mem[2,2,2,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm30[2,1,3,2]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm31 = ymm31[0,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermq $246, (%rsp), %ymm2 # 32-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # ymm2 = mem[2,1,3,3]
-; AVX512F-FAST-NEXT:    vpermq $234, (%rsp), %ymm1 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # ymm1 = mem[2,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm0 = mem[0,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm24[0,2,2,3]
 ; AVX512F-FAST-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # ymm5 = mem[2,1,3,2]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm23[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm22[2,2,2,3]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 32-byte Folded Reload
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm10, %zmm6
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm24 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm24, %zmm6
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm28
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm28
@@ -5567,21 +5582,21 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    # ymm3 = mem[0,0,2,1]
 ; AVX512F-FAST-NEXT:    vpermq $208, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # ymm6 = mem[0,0,1,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm21 = ymm21[0,0,1,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm19 = ymm19[0,0,1,1]
 ; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[0,2,3,3,4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,2,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,1,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm22[0,0,2,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm23 = ymm18[0,1,1,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[0,0,1,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm25 = ymm20[0,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm27 = ymm19[0,1,1,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm16[2,1,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,1,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,1,3,2]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm21 = ymm21[0,0,2,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm18[0,1,1,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[0,0,1,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm25 = ymm26[0,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm27 = ymm20[0,1,1,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm16[2,1,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,1,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,1,3,2]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,2,2,3]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm31, %zmm2
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
@@ -5590,19 +5605,19 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm29
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm3, %zmm0
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm21, %zmm2
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm10, %zmm2
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm19, %zmm2
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm24, %zmm2
 ; AVX512F-FAST-NEXT:    vpbroadcastd 36(%rax), %ymm0
 ; AVX512F-FAST-NEXT:    vpbroadcastd 40(%rax), %ymm3
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm0
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm22, %zmm9, %zmm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm23, %zmm3
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm8, %zmm2
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm23, %zmm22, %zmm3
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm3
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm27, %zmm25, %zmm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm30, %zmm4
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm30, %zmm4
 ; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm1, %zmm4
 ; AVX512F-FAST-NEXT:    vpbroadcastd (%rax), %ymm1
 ; AVX512F-FAST-NEXT:    vpbroadcastd 4(%rax), %ymm2
@@ -5615,12 +5630,13 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm2
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm7, %zmm4
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm10, %zmm3
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm7, %zmm4
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <u,u,4,u,u,u,5,u,u,13,u,u,u,14,u,u>
-; AVX512F-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm26, %zmm3
+; AVX512F-FAST-NEXT:    vpermd %zmm11, %zmm3, %zmm3
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm3
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm3
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm17, 320(%rax)
@@ -7971,7 +7987,7 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm13[4],xmm1[4],xmm13[5],xmm1[5],xmm13[6],xmm1[6],xmm13[7],xmm1[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm7[2,2,2,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,5,4]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm12 = xmm3[2,3,2,3]
@@ -8041,7 +8057,7 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm13 = xmm0[2],xmm13[2],xmm0[3],xmm13[3]
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm13 = xmm2[4],xmm13[4],xmm2[5],xmm13[5],xmm2[6],xmm13[6],xmm2[7],xmm13[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[1,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm14[2,2,2,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,5,5,4]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm3 = xmm14[0],xmm3[1],xmm14[2,3,4,5,6,7]
@@ -8067,7 +8083,7 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm13 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[1,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[2,2,2,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,5,5,4]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm14 = xmm5[2,3,2,3]
@@ -8133,7 +8149,7 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vpunpckhdq {{.*#+}} xmm12 = xmm1[2],xmm12[2],xmm1[3],xmm12[3]
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm13 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[1,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[2,2,2,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,5,5,4]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm2 = xmm11[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm2 = xmm13[0],xmm2[1],xmm13[2,3,4,5,6,7]
@@ -8160,7 +8176,7 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[1,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm12[2,2,2,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,4,5,5,4]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm13 = xmm0[2,3,2,3]
@@ -8222,7 +8238,7 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
 ; AVX1-ONLY-NEXT:    vpshufb %xmm15, %xmm8, %xmm6
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[1,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[2,2,2,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,5,4]
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm0 = xmm5[2,3,2,3]
 ; AVX1-ONLY-NEXT:    vpblendw {{.*#+}} xmm0 = xmm8[0],xmm0[1],xmm8[2,3,4,5,6,7]
@@ -8246,7 +8262,7 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
-; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
+; AVX1-ONLY-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[2,2,2,2,4,5,6,7]
 ; AVX1-ONLY-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,5,4]
 ; AVX1-ONLY-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vpshufd {{.*#+}} xmm6 = xmm5[2,3,2,3]
@@ -10668,1446 +10684,1458 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-SLOW-LABEL: store_i16_stride7_vf64:
 ; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    subq $2264, %rsp # imm = 0x8D8
+; AVX512F-SLOW-NEXT:    subq $2440, %rsp # imm = 0x988
 ; AVX512F-SLOW-NEXT:    vmovdqa 96(%rcx), %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdx), %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm12
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rsi), %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm1, %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm21
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm5, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm23
-; AVX512F-SLOW-NEXT:    vporq %ymm2, %ymm4, %ymm22
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm10, %ymm12, %ymm5
-; AVX512F-SLOW-NEXT:    vporq %ymm4, %ymm5, %ymm16
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%r9), %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm4, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%r8), %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm5, %ymm5
-; AVX512F-SLOW-NEXT:    vporq %ymm4, %ymm5, %ymm26
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm4, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm6
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm6, %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm30
-; AVX512F-SLOW-NEXT:    vpor %ymm4, %ymm5, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rsi), %ymm5
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm5, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm18
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm15
-; AVX512F-SLOW-NEXT:    vpshufb %ymm10, %ymm15, %ymm5
-; AVX512F-SLOW-NEXT:    vpor %ymm4, %ymm5, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm4, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm5, %ymm5
-; AVX512F-SLOW-NEXT:    vporq %ymm4, %ymm5, %ymm19
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm4, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm5, %ymm5
-; AVX512F-SLOW-NEXT:    vpor %ymm4, %ymm5, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm4, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %ymm10, %ymm5, %ymm5
-; AVX512F-SLOW-NEXT:    vpor %ymm4, %ymm5, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm11
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm11, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm9
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm9, %ymm1
-; AVX512F-SLOW-NEXT:    vpor %ymm3, %ymm1, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm8
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm8, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm6
-; AVX512F-SLOW-NEXT:    vpshufb %ymm10, %ymm6, %ymm2
-; AVX512F-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm3
-; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm3, %ymm10
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %ymm5
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm5, %ymm14
-; AVX512F-SLOW-NEXT:    vporq %ymm14, %ymm10, %ymm20
-; AVX512F-SLOW-NEXT:    vprold $16, %ymm5, %ymm10
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm3[1,2,2,3,5,6,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm14[0,1],ymm10[2],ymm14[3,4],ymm10[5],ymm14[6,7,8,9],ymm10[10],ymm14[11,12],ymm10[13],ymm14[14,15]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm5[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[2,2,2,3,6,6,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm3[3,3,3,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm13 = ymm14[0,1],ymm13[2],ymm14[3,4],ymm13[5],ymm14[6,7,8,9],ymm13[10],ymm14[11,12],ymm13[13],ymm14[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [2,1,3,2,10,10,10,11]
-; AVX512F-SLOW-NEXT:    vpermi2q %zmm13, %zmm10, %zmm1
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm14
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm14, %ymm10
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm24
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm23[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm13[2],ymm10[3,4],ymm13[5],ymm10[6,7,8,9],ymm13[10],ymm10[11,12],ymm13[13],ymm10[14,15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm10[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm14[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm23[3,3,3,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm13[0],ymm10[1],ymm13[2,3],ymm10[4],ymm13[5,6,7,8],ymm10[9],ymm13[10,11],ymm10[12],ymm13[13,14,15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm10[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdx), %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm7
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rsi), %ymm15
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm1, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm20
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm2, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm24
+; AVX512F-SLOW-NEXT:    vporq %ymm0, %ymm1, %ymm19
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm15, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm7, %ymm1
+; AVX512F-SLOW-NEXT:    vporq %ymm0, %ymm1, %ymm16
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%r9), %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm2, %ymm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm31
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%r8), %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm10, %ymm0, %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm18
+; AVX512F-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rcx), %ymm6
+; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm6, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdx), %ymm5
+; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm5, %ymm2
+; AVX512F-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rsi), %ymm4
+; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm4, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm3
+; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm3, %ymm0
+; AVX512F-SLOW-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm1
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm0[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm12[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm13[0],ymm10[1],ymm13[2,3],ymm10[4],ymm13[5,6,7,8],ymm10[9],ymm13[10,11],ymm10[12],ymm13[13,14,15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm10[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm0[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm12[3,3,3,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm13[0,1,2],ymm10[3],ymm13[4,5],ymm10[6],ymm13[7,8,9,10],ymm10[11],ymm13[12,13],ymm10[14],ymm13[15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm10[2,1,3,3]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm10, %ymm1, %ymm1
+; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm1
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%r9), %ymm10
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm10[0,1,2,3,5,5,7,6,8,9,10,11,13,13,15,14]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm13[3,3,3,3]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm23[0,1,1,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm14[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[0,0,0,0,4,4,4,4]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0,1],ymm14[2],ymm13[3,4],ymm14[5],ymm13[6,7,8,9],ymm14[10],ymm13[11,12],ymm14[13],ymm13[14,15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,1,3,2]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[1,1,1,1,5,5,5,5]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm12[2],ymm0[3,4],ymm12[5],ymm0[6,7,8,9],ymm12[10],ymm0[11,12],ymm12[13],ymm0[14,15]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm1, %ymm1
+; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm2, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm21
+; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm2
+; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm2, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm14
+; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm14, %ymm9
+; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm9, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm9
+; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm9, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm12
+; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm12, %ymm13
+; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm13, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm13
+; AVX512F-SLOW-NEXT:    vpshufb %ymm10, %ymm13, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %ymm1
+; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm1, %ymm11
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm22
+; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm11, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vprold $16, %ymm1, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm13[1,2,2,3,5,6,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm11[0,1],ymm0[2],ymm11[3,4],ymm0[5],ymm11[6,7,8,9],ymm0[10],ymm11[11,12],ymm0[13],ymm11[14,15]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm11 = ymm1[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm11[2,2,2,3,6,6,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm13[3,3,3,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm11[0,1],ymm8[2],ymm11[3,4],ymm8[5],ymm11[6,7,8,9],ymm8[10],ymm11[11,12],ymm8[13],ymm11[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [2,1,3,2,10,10,10,11]
+; AVX512F-SLOW-NEXT:    vpermi2q %zmm8, %zmm0, %zmm10
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm10
+; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm10, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm17
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm24[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm8[2],ymm0[3,4],ymm8[5],ymm0[6,7,8,9],ymm8[10],ymm0[11,12],ymm8[13],ymm0[14,15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm22, %zmm12
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm16, %zmm0
-; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%r8), %ymm12
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm12[1,2,2,3,5,6,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm12[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[14,15],zero,zero,ymm12[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[16,17],zero,zero,ymm12[u,u],zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm21, %ymm0, %ymm12
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm10, %ymm14
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm12, %ymm14
-; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm13[2,1,3,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm12
-; AVX512F-SLOW-NEXT:    vprold $16, %ymm10, %ymm0
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm12, %ymm0
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm14[0,1,2,3],zmm0[0,1,2,3]
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rax), %ymm13
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm13[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm10[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm24[3,3,3,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0],ymm0[1],ymm8[2,3],ymm0[4],ymm8[5,6,7,8],ymm0[9],ymm8[10,11],ymm0[12],ymm8[13,14,15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm15[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm7[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0],ymm0[1],ymm8[2,3],ymm0[4],ymm8[5,6,7,8],ymm0[9],ymm8[10,11],ymm0[12],ymm8[13,14,15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpandnq %ymm0, %ymm21, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm13, %ymm12
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm13, %ymm31
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm12, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm15[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm7[3,3,3,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0,1,2],ymm0[3],ymm8[4,5],ymm0[6],ymm8[7,8,9,10],ymm0[11],ymm8[12,13],ymm0[14],ymm8[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,3,3]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%r9), %ymm0
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm0[0,1,2,3,5,5,7,6,8,9,10,11,13,13,15,14]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[3,3,3,3]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm0[2,1,2,3,6,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm8[0,0,3,3,4,5,6,7,8,8,11,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,2]
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm24[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm10[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm11[0,0,0,0,4,4,4,4]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0,1],ymm11[2],ymm8[3,4],ymm11[5],ymm8[6,7,8,9],ymm11[10],ymm8[11,12],ymm11[13],ymm8[14,15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,3,2]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[1,1,1,1,5,5,5,5]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm15[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm11[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm11[0,1],ymm7[2],ymm11[3,4],ymm7[5],ymm11[6,7,8,9],ymm7[10],ymm11[11,12],ymm7[13],ymm11[14,15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vprold $16, %ymm0, %ymm11
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm10
+; AVX512F-SLOW-NEXT:    vpshufb %ymm10, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm19, %zmm8
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm16, %zmm7
+; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm7
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%r8), %ymm8
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm8[1,2,2,3,5,6,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm8[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[14,15],zero,zero,ymm8[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[16,17],zero,zero,ymm8[u,u],zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm19, %ymm7, %ymm8
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm0
+; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm7, %ymm7
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm15[2,1,3,3]
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm8
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm11[2,2,2,2]
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm7
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm7[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rax), %ymm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm0[0,1,2,2,4,5,6,6]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm0[2,3,3,3,6,7,7,7]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm0[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpandnq %ymm7, %ymm19, %ymm7
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
 ; AVX512F-SLOW-NEXT:    vpbroadcastd 72(%rax), %ymm0
-; AVX512F-SLOW-NEXT:    vpandnq %ymm0, %ymm16, %ymm12
+; AVX512F-SLOW-NEXT:    vpandn %ymm0, %ymm10, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqa 64(%rax), %ymm7
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm7, %ymm13
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm13
+; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm7, %ymm11
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 64(%r9), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%r8), %xmm12
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm17
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,0,1,1,8,9,10,11]
-; AVX512F-SLOW-NEXT:    vpermt2q %zmm26, %zmm22, %zmm14
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm23, %zmm14, %zmm13
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpbroadcastd 8(%rax), %ymm14
-; AVX512F-SLOW-NEXT:    vpandnq %ymm14, %ymm16, %ymm25
-; AVX512F-SLOW-NEXT:    vmovdqa (%rax), %ymm13
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm13, %ymm14
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm13, %ymm27
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm25, %zmm13
-; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm0
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm29
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm28
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm14, %xmm14
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm16
-; AVX512F-SLOW-NEXT:    vpermt2q %zmm19, %zmm22, %zmm14
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm23, %zmm14, %zmm13
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm11[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[0,0,0,0,4,4,4,4]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm9[0,1,1,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm13[0,1],ymm14[2],ymm13[3,4],ymm14[5],ymm13[6,7,8,9],ymm14[10],ymm13[11,12],ymm14[13],ymm13[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm8[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm13[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm6[1,1,1,1,5,5,5,5]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm13[0,1],ymm14[2],ymm13[3,4],ymm14[5],ymm13[6,7,8,9],ymm14[10],ymm13[11,12],ymm14[13],ymm13[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm5[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,0,0,0,4,4,4,4]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7,8,9,10],ymm3[11],ymm2[12,13],ymm3[14],ymm2[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <0,1,u,3,10,10,11,11>
-; AVX512F-SLOW-NEXT:    vpermi2q %zmm2, %zmm20, %zmm3
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rax), %ymm3
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm3[0,1,1,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpandnq %ymm2, %ymm21, %ymm2
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm3, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm25
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm24, %ymm14
-; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm11, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm9[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm9[3,3,3,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm11[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%r8), %xmm15
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm11 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm16
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm11, %xmm11
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpbroadcastd 8(%rax), %ymm11
+; AVX512F-SLOW-NEXT:    vpandnq %ymm11, %ymm10, %ymm20
+; AVX512F-SLOW-NEXT:    vmovdqa (%rax), %ymm11
+; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm11, %ymm10
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm30
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm20, %zmm10
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm2[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[0,0,0,0,4,4,4,4]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm14[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm11[0,1],ymm10[2],ymm11[3,4],ymm10[5],ymm11[6,7,8,9],ymm10[10],ymm11[11,12],ymm10[13],ymm11[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm9[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm11 = ymm12[1,1,1,1,5,5,5,5]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7,8,9],ymm11[10],ymm10[11,12],ymm11[13],ymm10[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm26
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rax), %ymm11
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm11[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpandnq %ymm10, %ymm19, %ymm10
+; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm11, %ymm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm27
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm13[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm8[3],ymm1[4,5],ymm8[6],ymm1[7,8,9,10],ymm8[11],ymm1[12,13],ymm8[14],ymm1[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm25
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm10
+; AVX512F-SLOW-NEXT:    vpshufb %ymm10, %ymm2, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm14[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm8[2],ymm1[3,4],ymm8[5],ymm1[6,7,8,9],ymm8[10],ymm1[11,12],ymm8[13],ymm1[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm14[3,3,3,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6,7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14,15]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm8[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6,7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14,15]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm9[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm6[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm6[3,3,3,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm8[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm12[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm12[3,3,3,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm9[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7,8,9,10],ymm2[11],ymm1[12,13],ymm2[14],ymm1[15]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm9
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm9[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7,8,9,10],ymm2[11],ymm1[12,13],ymm2[14],ymm1[15]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm4[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm15[1,1,1,1,5,5,5,5]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm9[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm3[1,1,1,1,5,5,5,5]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm4[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm15[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm3[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm3[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm6[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm30[0,1,1,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm3, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm3, %ymm6
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm30[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm24
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vprold $16, %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm5[1,2,2,3,5,6,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm5[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm28
+; AVX512F-SLOW-NEXT:    vpshufb %ymm10, %ymm6, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm5[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm8
+; AVX512F-SLOW-NEXT:    vprold $16, %ymm31, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm18[1,2,2,3,5,6,6,7]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm0[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, %ymm4
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm8[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm9
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,0,0,0,4,4,4,4]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm5[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7,8,9,10],ymm3[11],ymm2[12,13],ymm3[14],ymm2[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [2,2,3,3,10,9,11,10]
-; AVX512F-SLOW-NEXT:    vpermt2q %zmm1, %zmm11, %zmm2
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm18[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm8[3],ymm2[4,5],ymm8[6],ymm2[7,8,9,10],ymm8[11],ymm2[12,13],ymm8[14],ymm2[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [2,2,3,3,10,9,11,10]
+; AVX512F-SLOW-NEXT:    vpermt2q %zmm1, %zmm12, %zmm2
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm7[0,1,1,3,4,5,5,7]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm7[0,1,2,2,4,5,6,6]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,3,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm13, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm30[3,3,3,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm7[0,1,2,2,4,5,6,6]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,3,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm1, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm11, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm5[3,3,3,3,7,7,7,7]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm6[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6,7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14,15]
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm15[3,3,3,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm9[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[3,3,3,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm4[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7,8,9,10],ymm2[11],ymm1[12,13],ymm2[14],ymm1[15]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm5[3,3,3,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm4[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7,8,9,10],ymm2[11],ymm1[12,13],ymm2[14],ymm1[15]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm18[3,3,3,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm9[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,3,6,6,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [4,5,4,5,4,5,6,7,16,17,16,17,16,17,17,19]
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%r9), %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%r8), %xmm5
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm6[0,1,3,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermt2d %zmm1, %zmm2, %zmm3
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm7[2,3,3,3,6,7,7,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,3,2]
-; AVX512F-SLOW-NEXT:    vpbroadcastd 96(%rax), %ymm1
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm1, %zmm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rsi), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm3
-; AVX512F-SLOW-NEXT:    vprold $16, %xmm0, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm3[1,1,2,3]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0,1],xmm7[2],xmm8[3,4],xmm7[5],xmm8[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm7, %xmm30
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rcx), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdx), %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm0, %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm15, %xmm9
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm3[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm8[0],xmm7[1],xmm8[2,3],xmm7[4],xmm8[5,6],xmm7[7]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm6[0,1,2,3,4,5,7,6]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <16,18,19,19,19,19,u,u,0,1,0,1,2,3,2,3>
-; AVX512F-SLOW-NEXT:    vpermt2d %zmm3, %zmm4, %zmm0
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [4,5,4,5,4,5,6,7,16,17,16,17,16,17,17,19]
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%r9), %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%r8), %xmm6
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm8[0,1,3,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpermt2d %zmm4, %zmm13, %zmm1
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm7[2,3,3,3,6,7,7,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,1,3,2]
+; AVX512F-SLOW-NEXT:    vpbroadcastd 96(%rax), %ymm5
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm5, %zmm4
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rsi), %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdi), %xmm7
+; AVX512F-SLOW-NEXT:    vprold $16, %xmm1, %xmm9
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm7[1,1,2,3]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm10[0,1],xmm9[2],xmm10[3,4],xmm9[5],xmm10[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm23
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rcx), %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa 96(%rdx), %xmm7
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm1, %xmm9
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm7[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm4 = xmm10[0],xmm9[1],xmm10[2,3],xmm9[4],xmm10[5,6],xmm9[7]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,4,5,7,6]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <16,18,19,19,19,19,u,u,0,1,0,1,2,3,2,3>
+; AVX512F-SLOW-NEXT:    vpermt2d %zmm3, %zmm0, %zmm1
 ; AVX512F-SLOW-NEXT:    vpbroadcastd 100(%rax), %ymm3
-; AVX512F-SLOW-NEXT:    vpbroadcastd 104(%rax), %ymm4
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm3
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm0
+; AVX512F-SLOW-NEXT:    vpbroadcastd 104(%rax), %ymm6
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm3, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm31, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdi), %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa 64(%rsi), %xmm3
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm23
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm21
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm8, %xmm19
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
 ; AVX512F-SLOW-NEXT:    vprold $16, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm3[2],xmm0[3,4],xmm3[5],xmm0[6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm3[2],xmm1[3,4],xmm3[5],xmm1[6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rcx), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm3
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm15 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm15, %xmm18
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1],xmm3[2,3],xmm0[4],xmm3[5,6],xmm0[7]
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rcx), %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa 64(%rdx), %xmm6
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm6[0],xmm1[1],xmm6[2,3],xmm1[4],xmm6[5,6],xmm1[7]
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm0
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm0[0,1,2,3,4,5,7,6]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm0
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,7,6]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,3,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <0,1,0,1,0,1,1,3,16,18,19,19,19,19,u,u>
-; AVX512F-SLOW-NEXT:    vpermt2d %zmm3, %zmm4, %zmm0
-; AVX512F-SLOW-NEXT:    vpbroadcastd 64(%rax), %ymm3
-; AVX512F-SLOW-NEXT:    vpbroadcastd 68(%rax), %ymm5
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm5
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <0,1,0,1,0,1,1,3,16,18,19,19,19,19,u,u>
+; AVX512F-SLOW-NEXT:    vpermt2d %zmm1, %zmm6, %zmm0
+; AVX512F-SLOW-NEXT:    vpbroadcastd 64(%rax), %ymm1
+; AVX512F-SLOW-NEXT:    vpbroadcastd 68(%rax), %ymm7
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm4
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm1, %zmm4
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm5
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm22
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
-; AVX512F-SLOW-NEXT:    vprold $16, %xmm5, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm7
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm14, %xmm22
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
+; AVX512F-SLOW-NEXT:    vprold $16, %xmm7, %xmm7
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2],xmm0[3,4],xmm5[5],xmm0[6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm7[2],xmm0[3,4],xmm7[5],xmm0[6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm5
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm19
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm5[0],xmm0[1],xmm5[2,3],xmm0[4],xmm5[5,6],xmm0[7]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm5
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm0[0,1,2,3,4,5,7,6]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm7
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm20
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm29
+; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0],xmm0[1],xmm7[2,3],xmm0[4],xmm7[5,6],xmm0[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm24
+; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm7
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm15 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm0[0,1,2,3,4,5,7,6]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,3,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermt2d %zmm5, %zmm4, %zmm0
-; AVX512F-SLOW-NEXT:    vpbroadcastd (%rax), %ymm4
-; AVX512F-SLOW-NEXT:    vpbroadcastd 4(%rax), %ymm5
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm4
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm7[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpermt2d %zmm7, %zmm6, %zmm0
+; AVX512F-SLOW-NEXT:    vpbroadcastd (%rax), %ymm6
+; AVX512F-SLOW-NEXT:    vpbroadcastd 4(%rax), %ymm7
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm2
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm1, %zmm2
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm9[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm4[1,1,1,1,5,5,5,5]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7,8,9],ymm3[10],ymm0[11,12],ymm3[13],ymm0[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm20
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm7[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm4
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm21[1,1,1,1,5,5,5,5]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm21
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm9[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm4[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm4, %ymm9
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6,7,8],ymm0[9],ymm3[10,11],ymm0[12],ymm3[13,14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm16
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vprold $16, %ymm5, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm6[1,2,2,3,5,6,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1],ymm0[2],ymm3[3,4],ymm0[5],ymm3[6,7,8,9],ymm0[10],ymm3[11,12],ymm0[13],ymm3[14,15]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm5[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,0,0,0,4,4,4,4]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm6[0,0,2,1,4,4,6,5]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm3[0,1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8,9,10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm10[2,1,2,3,6,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[0,0,3,3,4,5,6,7,8,8,11,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpermt2q %zmm0, %zmm11, %zmm4
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm27[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm4[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm4, %ymm10
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm17
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vprold $16, %ymm4, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm7[1,2,2,3,5,6,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm7[0,0,2,1,4,4,6,5]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm6[3],ymm1[4,5],ymm6[6],ymm1[7,8,9,10],ymm6[11],ymm1[12,13],ymm6[14],ymm1[15]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,2,3,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm3[0,0,2,1]
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpermt2q %zmm0, %zmm12, %zmm1
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm30[0,1,1,3,4,5,5,7]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm27[0,1,2,2,4,5,6,6]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm30[0,1,2,2,4,5,6,6]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,3,3]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm13, %zmm0
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm11, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm3[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm8[0,1,1,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm4[0,1],ymm0[2],ymm4[3,4],ymm0[5],ymm4[6,7,8,9],ymm0[10],ymm4[11,12],ymm0[13],ymm4[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm17
-; AVX512F-SLOW-NEXT:    vpshufb %ymm14, %ymm3, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm8[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm4[2],ymm0[3,4],ymm4[5],ymm0[6,7,8,9],ymm4[10],ymm0[11,12],ymm4[13],ymm0[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm26
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm8[3,3,3,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm3[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2,3],ymm4[4],ymm0[5,6,7,8],ymm4[9],ymm0[10,11],ymm4[12],ymm0[13,14,15]
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm6[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm18
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm6[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm16
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm6[3,3,3,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm3[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6,7,8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14,15]
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm9[3,3,3,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm7[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,2,6,6,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3],ymm0[4,5],ymm4[6],ymm0[7,8,9,10],ymm4[11],ymm0[12,13],ymm4[14],ymm0[15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm10[3,3,3,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm9[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,2,6,6,6,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8,9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm6[3,3,3,3,7,7,7,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm5[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,3,6,6,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm4[0,1],ymm0[2],ymm4[3,4],ymm0[5],ymm4[6,7,8,9],ymm0[10],ymm4[11,12],ymm0[13],ymm4[14,15]
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %xmm0
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm10[0,1,3,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermt2d %zmm11, %zmm2, %zmm5
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm27[2,3,3,3,6,7,7,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm7[3,3,3,3,7,7,7,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm4[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,2,3,6,6,6,7]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm7 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %xmm1
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm6[0,1,3,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpermt2d %zmm11, %zmm13, %zmm7
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm30[2,3,3,3,6,7,7,7]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,1,3,2]
 ; AVX512F-SLOW-NEXT:    vpbroadcastd 32(%rax), %ymm11
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm2, %zmm29
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm1, %zmm29
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm2
-; AVX512F-SLOW-NEXT:    vprold $16, %xmm2, %xmm5
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm1[1,1,2,3]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm3 = xmm13[0,1],xmm5[2],xmm13[3,4],xmm5[5],xmm13[6,7]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm15, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm14, %xmm3
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm2, %zmm30
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm7, %zmm5, %zmm30
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm5
+; AVX512F-SLOW-NEXT:    vprold $16, %xmm5, %xmm7
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm2[1,1,2,3]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm13[0,1],xmm7[2],xmm13[3,4],xmm7[5],xmm13[6,7]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm15, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm30 = ymm31[0,1,2,2,4,5,6,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm31[2,3,3,3,6,7,7,7]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm15
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm15[0,2,3,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm21 = ymm4[0,0,2,1]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm2
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm2[2,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,5,4]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm4[0,0,1,3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm2
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm2[0,2,3,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm31 = ymm4[0,0,2,1]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm2
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm2[2,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,5,4]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm19 = ymm4[0,0,1,3]
-; AVX512F-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm23 = mem[2,1,3,2]
-; AVX512F-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm22 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm22 = mem[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermpd $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm2 = mem[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vmovups %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpermpd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpermpd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vmovups %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpermpd $246, (%rsp), %ymm2 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm2 = mem[2,1,3,3]
-; AVX512F-SLOW-NEXT:    vmovups %ymm2, (%rsp) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,7,6]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <16,18,19,19,19,19,u,u,0,1,0,1,2,3,2,3>
-; AVX512F-SLOW-NEXT:    vpermt2d %zmm10, %zmm2, %zmm0
-; AVX512F-SLOW-NEXT:    vpbroadcastd 36(%rax), %ymm10
-; AVX512F-SLOW-NEXT:    vpbroadcastd 40(%rax), %ymm14
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm10, %zmm10
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm10
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm14
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm3
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm14, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm13, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm8
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm8[2,1,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,4,5,5,4]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm19 = ymm13[0,0,1,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm2[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm0
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm0[0,2,3,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm23 = ymm5[0,0,2,1]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm14
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm14[2,1,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,5,4]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm7[0,0,1,3]
+; AVX512F-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm22 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm22 = mem[2,1,3,2]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm26 = ymm26[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm25[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vpermpd $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm0 = mem[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpermpd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm0 = mem[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpermpd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm0 = mem[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpermpd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm0 = mem[2,1,3,3]
+; AVX512F-SLOW-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,7,6]
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <16,18,19,19,19,19,u,u,0,1,0,1,2,3,2,3>
+; AVX512F-SLOW-NEXT:    vpermt2d %zmm6, %zmm0, %zmm1
+; AVX512F-SLOW-NEXT:    vpbroadcastd 36(%rax), %ymm6
+; AVX512F-SLOW-NEXT:    vpbroadcastd 40(%rax), %ymm13
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm6, %zmm13
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm31, %zmm13
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm6
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm6[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm0
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm0[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm13[0],xmm15[1],xmm13[2,3],xmm15[4],xmm13[5,6],xmm15[7]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm0[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm15[0],xmm1[1],xmm15[2,3],xmm1[4],xmm15[5,6],xmm1[7]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
 ; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm25[0,1,2,2,4,5,6,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm25[2,3,3,3,6,7,7,7]
-; AVX512F-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm28 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm28 = mem[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm27 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm27 = mem[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm25 = mem[2,1,3,2]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm1[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm5 = mem[0,0,2,1]
-; AVX512F-SLOW-NEXT:    vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # xmm15 = mem[0,1,3,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm4 = mem[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm7[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm6 = mem[0,0,2,1]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm27[0,1,2,2,4,5,6,6]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm27[2,3,3,3,6,7,7,7]
+; AVX512F-SLOW-NEXT:    vpermpd $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm0 = mem[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm31 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm31 = mem[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[2,1,3,2]
+; AVX512F-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm27 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm27 = mem[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm3[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm7 = mem[0,0,2,1]
 ; AVX512F-SLOW-NEXT:    vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # xmm14 = mem[0,1,3,2,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm3 = mem[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm20[2,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm16[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm17[2,1,3,2]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm26[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm5 = mem[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm4[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm4 = mem[0,0,2,1]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm0
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm0[0,1,3,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm24[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm21 = ymm21[2,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm17[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm18[2,1,3,2]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm16[2,2,2,3]
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm26, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm24 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm24, %zmm0
 ; AVX512F-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm1 = mem[2,1,3,2]
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21, %zmm1 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm18 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm7, %zmm18
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm18 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm31, %zmm31 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm2[0,1,2,3],zmm1[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm1 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm29 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm10[0,1,2,3],zmm29[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm10 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19, %zmm19 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm31, %zmm7, %zmm19
-; AVX512F-SLOW-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm19 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm29, %zmm19
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm2, %zmm19
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm1 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm20 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm29, %zmm20
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm10, %zmm2, %zmm20
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm1, %zmm1
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm23, %zmm21
-; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm21
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm1, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm26, %zmm2, %zmm2
+; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm31 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm31 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm7 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm23 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm25, %zmm0, %zmm10
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm22[0,1,2,3],zmm10[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm10 # 64-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
 ; AVX512F-SLOW-NEXT:    vpternlogd $226, 124(%r8){1to8}, %ymm1, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm1[0,1,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm30[2,1,3,3]
-; AVX512F-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm23 = mem[2,1,3,2]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm1, %zmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm23, %zmm1
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm9[2,1,3,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm13[2,1,3,2]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm0 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm3[0,1,2,3],zmm0[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm1 = mem[2,1,3,3]
+; AVX512F-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm22 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm22 = mem[2,1,3,2]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm1, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm22, %zmm1
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm12[2,1,3,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm15[2,1,3,2]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm0 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm7
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm14[0,1,1,3]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm8, %zmm5
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm15[0,1,1,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm8, %zmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm8, %zmm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm11, %zmm4
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm14[0,1,1,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm5, %zmm3
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm7, %zmm8, %zmm5
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm11, %zmm4
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,1,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm6, %zmm3
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm8, %zmm3
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm30 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm4, %zmm30
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm4, %zmm23
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm27, %zmm28, %zmm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm25, %zmm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm4, %zmm3
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm20, %zmm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm17, %zmm5
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm4, %zmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0]
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm2, %zmm24
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm2, %zmm20
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm4, %zmm25
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm4, %zmm26
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, (%rsp), %zmm3, %zmm3 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm2, %zmm26, %zmm3
-; AVX512F-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # xmm4 = mem[0,1,3,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,1,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
-; AVX512F-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm4 = mem[2,1,3,3]
-; AVX512F-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm5 = mem[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm31, %zmm3, %zmm3
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm27, %zmm28, %zmm4
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm5, %zmm4
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm21, %zmm3
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm18, %zmm6
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm5, %zmm6
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0]
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm3, %zmm22
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm6, %zmm3, %zmm27
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm3, %zmm24, %zmm4
+; AVX512F-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm3 = mem[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # xmm5 = mem[0,1,3,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,1,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm3
+; AVX512F-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm5 = mem[2,1,3,3]
+; AVX512F-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm6 = mem[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm5
+; AVX512F-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm21 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm21 = mem[0,0,2,1]
+; AVX512F-SLOW-NEXT:    vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # xmm7 = mem[2,1,2,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,5,4]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,1,3]
+; AVX512F-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm8 = mem[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # xmm9 = mem[0,2,3,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,2,1]
+; AVX512F-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm12 = mem[0,2,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # xmm14 = mem[0,1,3,2,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm15 = mem[2,1,3,3]
+; AVX512F-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm16 = mem[0,0,1,1]
 ; AVX512F-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm17 = mem[0,0,2,1]
-; AVX512F-SLOW-NEXT:    vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # xmm6 = mem[2,1,2,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,5,4]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,1,3]
-; AVX512F-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm25 = mem[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # xmm8 = mem[0,2,3,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
-; AVX512F-SLOW-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm9 = mem[0,2,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw $180, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # xmm12 = mem[0,1,3,2,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm13 = mem[2,1,3,3]
-; AVX512F-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm14 = mem[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm15 = mem[0,0,2,1]
 ; AVX512F-SLOW-NEXT:    vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # xmm11 = mem[2,1,2,3,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,5,4]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,1,3]
-; AVX512F-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm16 = mem[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # xmm5 = mem[0,2,3,3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm26, %zmm4
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm12[0,1,1,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm9, %zmm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm13, %zmm9
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm26, %zmm9
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm2, %zmm12
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm9, %zmm2, %zmm29
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm17, %zmm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm25, %zmm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm6, %zmm4
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm15, %zmm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm16, %zmm5
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm6, %zmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0]
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm2, %zmm6
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm2, %zmm10
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm21, %zmm7
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm0
+; AVX512F-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm18 = mem[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # xmm6 = mem[0,2,3,3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,2,1]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm24, %zmm5
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm14[0,1,1,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm12, %zmm3
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm15, %zmm12
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm24, %zmm12
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535]
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm3, %zmm14
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm12, %zmm3, %zmm30
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm21, %zmm3
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm5
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm29, %zmm5
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm17, %zmm3
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm18, %zmm6
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm3, %zmm29, %zmm6
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0]
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm5, %zmm3, %zmm7
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm6, %zmm3, %zmm13
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm10
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm0
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm7, 320(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm10, 256(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm29, 192(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm20, 128(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm19, 64(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm23, (%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm30, 448(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm6, 704(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm12, 640(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm24, 576(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm18, 512(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm10, 320(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm13, 256(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm30, 192(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm27, 128(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm20, 64(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm26, (%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm25, 448(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm7, 704(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm14, 640(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm22, 576(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm19, 512(%rax)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, 384(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm31, 768(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm23, 768(%rax)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm1, 832(%rax)
-; AVX512F-SLOW-NEXT:    addq $2264, %rsp # imm = 0x8D8
+; AVX512F-SLOW-NEXT:    addq $2440, %rsp # imm = 0x988
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
 ; AVX512F-FAST-LABEL: store_i16_stride7_vf64:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    subq $2232, %rsp # imm = 0x8B8
+; AVX512F-FAST-NEXT:    subq $2200, %rsp # imm = 0x898
 ; AVX512F-FAST-NEXT:    vmovdqa 96(%rsi), %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %ymm6
 ; AVX512F-FAST-NEXT:    vmovdqa 96(%rcx), %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa 96(%rdx), %ymm10
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm22
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm10, %ymm3
-; AVX512F-FAST-NEXT:    vporq %ymm2, %ymm3, %ymm17
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
-; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm4, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm4, %ymm19
-; AVX512F-FAST-NEXT:    vporq %ymm2, %ymm3, %ymm23
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
-; AVX512F-FAST-NEXT:    vmovdqa 64(%r9), %ymm2
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa %ymm3, %ymm5
-; AVX512F-FAST-NEXT:    vmovdqa 64(%r8), %ymm3
-; AVX512F-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm3, %ymm3
-; AVX512F-FAST-NEXT:    vporq %ymm2, %ymm3, %ymm26
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rcx), %ymm2
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdx), %ymm3
-; AVX512F-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm3, %ymm3
-; AVX512F-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rsi), %ymm2
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %ymm11
-; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm11, %ymm3
-; AVX512F-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm2
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa %ymm5, %ymm4
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm3
-; AVX512F-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm3, %ymm3
-; AVX512F-FAST-NEXT:    vporq %ymm2, %ymm3, %ymm29
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm2
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm3
-; AVX512F-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm3, %ymm3
-; AVX512F-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm2
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm3
-; AVX512F-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm3, %ymm3
-; AVX512F-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 96(%rdx), %ymm15
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm25
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm15, %ymm3
+; AVX512F-FAST-NEXT:    vporq %ymm0, %ymm3, %ymm17
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm20
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm6, %ymm5
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm6, %ymm23
+; AVX512F-FAST-NEXT:    vporq %ymm4, %ymm5, %ymm16
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
+; AVX512F-FAST-NEXT:    vmovdqa 64(%r9), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa %ymm5, %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa 64(%r8), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm1, %ymm5
+; AVX512F-FAST-NEXT:    vporq %ymm4, %ymm5, %ymm24
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rcx), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rdx), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm5
+; AVX512F-FAST-NEXT:    vpor %ymm4, %ymm5, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rsi), %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm30
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %ymm14
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm14, %ymm5
+; AVX512F-FAST-NEXT:    vpor %ymm4, %ymm5, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa %ymm6, %ymm10
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm1, %ymm5
+; AVX512F-FAST-NEXT:    vporq %ymm4, %ymm5, %ymm31
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm5
+; AVX512F-FAST-NEXT:    vpor %ymm4, %ymm5, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm1, %ymm5
+; AVX512F-FAST-NEXT:    vpor %ymm4, %ymm5, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm13
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm13, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm9
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm9, %ymm2
-; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm2, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm6
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm6, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm5
-; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm5, %ymm15
-; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm15, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm12
+; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm12, %ymm4
+; AVX512F-FAST-NEXT:    vpor %ymm2, %ymm4, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm11
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm11, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm7
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm0
+; AVX512F-FAST-NEXT:    vpor %ymm2, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm15
-; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm15, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm2, %ymm14
-; AVX512F-FAST-NEXT:    vporq %ymm14, %ymm0, %ymm21
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm14 = ymm15[3,3,3,3,7,7,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm14[2],ymm0[3,4],ymm14[5],ymm0[6,7,8,9],ymm14[10],ymm0[11,12],ymm14[13],ymm0[14,15]
-; AVX512F-FAST-NEXT:    vprold $16, %ymm2, %ymm14
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm15[1,2,2,3,5,6,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0,1],ymm14[2],ymm8[3,4],ymm14[5],ymm8[6,7,8,9],ymm14[10],ymm8[11,12],ymm14[13],ymm8[14,15]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [2,1,3,2,10,10,10,11]
-; AVX512F-FAST-NEXT:    vpermi2q %zmm0, %zmm8, %zmm12
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm28
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm19[2,2,2,2,6,6,6,6]
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm6
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm6, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %ymm5
+; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm5, %ymm9
+; AVX512F-FAST-NEXT:    vporq %ymm9, %ymm0, %ymm22
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm6[3,3,3,3,7,7,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm9[2],ymm0[3,4],ymm9[5],ymm0[6,7,8,9],ymm9[10],ymm0[11,12],ymm9[13],ymm0[14,15]
+; AVX512F-FAST-NEXT:    vprold $16, %ymm5, %ymm9
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm6[1,2,2,3,5,6,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7,8,9],ymm9[10],ymm8[11,12],ymm9[13],ymm8[14,15]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [2,1,3,2,10,10,10,11]
+; AVX512F-FAST-NEXT:    vpermi2q %zmm0, %zmm8, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u>
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm20, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm29
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm23[2,2,2,2,6,6,6,6]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0],ymm0[1],ymm8[2,3],ymm0[4],ymm8[5,6,7,8],ymm0[9],ymm8[10,11],ymm0[12],ymm8[13,14,15]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29>
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm7, %ymm30
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm19[3,3,3,3,7,7,7,7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29>
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm20
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm23[3,3,3,3,7,7,7,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0,1,2],ymm0[3],ymm8[4,5],ymm0[6],ymm8[7,8,9,10],ymm0[11],ymm8[12,13],ymm0[14],ymm8[15]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27>
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm22, %ymm14
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm14, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm18
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm10[2,2,2,2,6,6,6,6]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27>
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm25, %ymm4
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm21
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm15[2,2,2,2,6,6,6,6]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm8[2],ymm0[3,4],ymm8[5],ymm0[6,7,8,9],ymm8[10],ymm0[11,12],ymm8[13],ymm0[14,15]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm14, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm7, %ymm27
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm10[3,3,3,3,7,7,7,7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm25
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm15[3,3,3,3,7,7,7,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0],ymm0[1],ymm8[2,3],ymm0[4],ymm8[5,6,7,8],ymm0[9],ymm8[10,11],ymm0[12],ymm8[13,14,15]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm14[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm10[0,1,1,3,4,5,5,7]
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm4[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm15[0,1,1,3,4,5,5,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0,1],ymm0[2],ymm8[3,4],ymm0[5],ymm8[6,7,8,9],ymm0[10],ymm8[11,12],ymm0[13],ymm8[14,15]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,3,2]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23>
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm8
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm16
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm10 = ymm19[1,1,1,1,5,5,5,5]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0,1],ymm10[2],ymm8[3,4],ymm10[5],ymm8[6,7,8,9],ymm10[10],ymm8[11,12],ymm10[13],ymm8[14,15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23>
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm8
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm18
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm9 = ymm23[1,1,1,1,5,5,5,5]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0,1],ymm9[2],ymm8[3,4],ymm9[5],ymm8[6,7,8,9],ymm9[10],ymm8[11,12],ymm9[13],ymm8[14,15]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm17, %zmm0
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm23, %zmm8
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm16, %zmm8
 ; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm8
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <5,u,u,u,6,u,u,6>
-; AVX512F-FAST-NEXT:    vmovdqa 96(%r8), %ymm10
-; AVX512F-FAST-NEXT:    vpermd %ymm10, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa 96(%r8), %ymm9
+; AVX512F-FAST-NEXT:    vpermd %ymm9, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,4,u,u,u,5,u,u>
-; AVX512F-FAST-NEXT:    vpermd %ymm10, %ymm0, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm10[14,15],zero,zero,ymm10[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm10[16,17],zero,zero,ymm10[u,u],zero,zero
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm17, %ymm8, %ymm10
+; AVX512F-FAST-NEXT:    vpermd %ymm9, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm9[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm9[14,15],zero,zero,ymm9[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm9[16,17],zero,zero,ymm9[u,u],zero,zero
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm16, %ymm8, %ymm9
 ; AVX512F-FAST-NEXT:    vmovdqa 96(%r9), %ymm1
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm14
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm10, %ymm14
+; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm1, %ymm15
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm15
 ; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm8, %ymm8
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm0
 ; AVX512F-FAST-NEXT:    vprold $16, %ymm1, %ymm8
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,2]
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm8
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm14[0,1,2,3],zmm8[0,1,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm15[0,1,2,3],zmm0[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-FAST-NEXT:    vmovdqa64 64(%rax), %zmm22
+; AVX512F-FAST-NEXT:    vmovdqa64 64(%rax), %zmm23
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <u,5,u,u,u,6,u,u,30,u,u,u,31,u,u,31>
 ; AVX512F-FAST-NEXT:    vmovdqa 96(%rax), %ymm0
-; AVX512F-FAST-NEXT:    vpermi2d %zmm22, %zmm0, %zmm1
+; AVX512F-FAST-NEXT:    vpermi2d %zmm23, %zmm0, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [0,1,4,5,4,5,5,7]
-; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm23, %ymm8
-; AVX512F-FAST-NEXT:    vpandnq %ymm8, %ymm17, %ymm8
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [0,1,4,5,4,5,5,7]
+; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm17, %ymm8
+; AVX512F-FAST-NEXT:    vpandnq %ymm8, %ymm16, %ymm8
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
 ; AVX512F-FAST-NEXT:    vpbroadcastd 72(%rax), %ymm0
-; AVX512F-FAST-NEXT:    vpandn %ymm0, %ymm12, %ymm8
-; AVX512F-FAST-NEXT:    vmovdqa 64(%rax), %ymm7
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm7, %ymm10
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm19
-; AVX512F-FAST-NEXT:    vmovdqa 64(%r9), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa 64(%r8), %xmm10
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm0, %xmm25
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm24 = [0,0,1,1,8,9,10,11]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm26, %zmm24, %zmm14
+; AVX512F-FAST-NEXT:    vpandn %ymm0, %ymm1, %ymm8
+; AVX512F-FAST-NEXT:    vmovdqa 64(%rax), %ymm10
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm10, %ymm9
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm27
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm15
+; AVX512F-FAST-NEXT:    vmovdqa 64(%r9), %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa 64(%r8), %xmm9
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm9[4],xmm2[4],xmm9[5],xmm2[5],xmm9[6],xmm2[6],xmm9[7],xmm2[7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm28
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm24 = [0,0,1,1,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpermt2q %zmm15, %zmm24, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm26 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm26, %zmm14, %zmm19
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpbroadcastd 8(%rax), %ymm14
-; AVX512F-FAST-NEXT:    vpandn %ymm14, %ymm12, %ymm14
-; AVX512F-FAST-NEXT:    vmovdqa (%rax), %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm12
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm20
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm14, %zmm14
-; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm4
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm19
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm12, %xmm12
-; AVX512F-FAST-NEXT:    vpermt2q %zmm29, %zmm24, %zmm12
-; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm26, %zmm12, %zmm14
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm12 = ymm13[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm14 = ymm9[0,1,1,3,4,5,5,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm14[0,1],ymm12[2],ymm14[3,4],ymm12[5],ymm14[6,7,8,9],ymm12[10],ymm14[11,12],ymm12[13],ymm14[14,15]
+; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm26, %zmm0, %zmm27
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpbroadcastd 8(%rax), %ymm0
+; AVX512F-FAST-NEXT:    vpandn %ymm0, %ymm1, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa (%rax), %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm15
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm0, %zmm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm31, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm8
+; AVX512F-FAST-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm15 = xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm15, %xmm15
+; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm24, %zmm15
+; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm26, %zmm15, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm13[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm12[0,1,1,3,4,5,5,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm15[0,1],ymm0[2],ymm15[3,4],ymm0[5],ymm15[6,7,8,9],ymm0[10],ymm15[11,12],ymm0[13],ymm15[14,15]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm16, %ymm4
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm6, %ymm12
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm14 = ymm5[1,1,1,1,5,5,5,5]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm12[0,1],ymm14[2],ymm12[3,4],ymm14[5],ymm12[6,7,8,9],ymm14[10],ymm12[11,12],ymm14[13],ymm12[14,15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm18, %ymm4
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm11, %ymm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm15 = ymm7[1,1,1,1,5,5,5,5]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm15[2],ymm0[3,4],ymm15[5],ymm0[6,7,8,9],ymm15[10],ymm0[11,12],ymm15[13],ymm0[14,15]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21>
-; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm15[0,0,2,1,4,4,6,5]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm12[3],ymm2[4,5],ymm12[6],ymm2[7,8,9,10],ymm12[11],ymm2[12,13],ymm12[14],ymm2[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = <0,1,u,3,10,10,11,11>
-; AVX512F-FAST-NEXT:    vpermi2q %zmm2, %zmm21, %zmm12
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rax), %ymm2
-; AVX512F-FAST-NEXT:    vpermd %ymm2, %ymm23, %ymm12
-; AVX512F-FAST-NEXT:    vpandnq %ymm12, %ymm17, %ymm12
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm2, %ymm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm1, %zmm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm5, %ymm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm6[0,0,2,1,4,4,6,5]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7,8,9,10],ymm2[11],ymm0[12,13],ymm2[14],ymm0[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,1,u,3,10,10,11,11>
+; AVX512F-FAST-NEXT:    vpermi2q %zmm0, %zmm22, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm28, %ymm14
-; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm6, %ymm1
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm5[2,2,2,2,6,6,6,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm12[0],ymm1[1],ymm12[2,3],ymm1[4],ymm12[5,6,7,8],ymm1[9],ymm12[10,11],ymm1[12],ymm12[13,14,15]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm30, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm6, %ymm1
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm5[3,3,3,3,7,7,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1,2],ymm1[3],ymm3[4,5],ymm1[6],ymm3[7,8,9,10],ymm1[11],ymm3[12,13],ymm1[14],ymm3[15]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm18, %ymm5
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm13, %ymm1
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm9[2,2,2,2,6,6,6,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7,8,9],ymm3[10],ymm1[11,12],ymm3[13],ymm1[14,15]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm27, %ymm15
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm13, %ymm1
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm9[3,3,3,3,7,7,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5,6,7,8],ymm1[9],ymm3[10,11],ymm1[12],ymm3[13,14,15]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 (%rax), %zmm18
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <u,21,u,u,u,22,u,u,14,u,u,u,15,u,u,15>
-; AVX512F-FAST-NEXT:    vpermi2d %zmm2, %zmm18, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rax), %ymm0
+; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm17, %ymm2
+; AVX512F-FAST-NEXT:    vpandnq %ymm2, %ymm16, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm9, %ymm1
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm11[1,1,1,1,5,5,5,5]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm9, %ymm1
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm11[2,2,2,2,6,6,6,6]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm29, %ymm5
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm11, %ymm1
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm7[2,2,2,2,6,6,6,6]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm3[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm4[0,1,1,3,4,5,5,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm20, %ymm6
+; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm11, %ymm1
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm7[3,3,3,3,7,7,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8,9,10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm3, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa %ymm3, %ymm12
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm4[2,2,2,2,6,6,6,6]
-; AVX512F-FAST-NEXT:    vmovdqa %ymm4, %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm21, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm13, %ymm1
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm12[2,2,2,2,6,6,6,6]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vprold $16, %ymm3, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm5[1,2,2,3,5,6,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
-; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm3, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa %ymm3, %ymm4
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm5[0,0,2,1,4,4,6,5]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7,8,9,10],ymm3[11],ymm2[12,13],ymm3[14],ymm2[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm24 = [2,2,3,3,10,9,11,10]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm1, %zmm24, %zmm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm22, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <u,u,4,u,u,u,5,u,u,13,u,u,u,14,u,u>
-; AVX512F-FAST-NEXT:    vpermd %zmm1, %zmm3, %zmm1
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm12, %ymm1
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm6[3,3,3,3,7,7,7,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm25, %ymm15
+; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm13, %ymm1
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm12[3,3,3,3,7,7,7,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13,14,15]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm9, %ymm1
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm11[3,3,3,3,7,7,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8,9,10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31>
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm23, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa64 (%rax), %zmm21
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <u,21,u,u,u,22,u,u,14,u,u,u,15,u,u,15>
+; AVX512F-FAST-NEXT:    vpermi2d %zmm0, %zmm21, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm2, %ymm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm14[1,1,1,1,5,5,5,5]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
+; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm30, %ymm4
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm14[2,2,2,2,6,6,6,6]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
+; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm5[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm2[0,1,1,3,4,5,5,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
+; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa %ymm5, %ymm11
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm2[2,2,2,2,6,6,6,6]
+; AVX512F-FAST-NEXT:    vmovdqa %ymm2, %ymm7
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
+; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vprold $16, %ymm2, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm5[1,2,2,3,5,6,6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm2, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa %ymm2, %ymm3
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm5[0,0,2,1,4,4,6,5]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3],ymm1[4,5],ymm2[6],ymm1[7,8,9,10],ymm2[11],ymm1[12,13],ymm2[14],ymm1[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [2,2,3,3,10,9,11,10]
+; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm12, %zmm2
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm23, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <u,u,4,u,u,u,5,u,u,13,u,u,u,14,u,u>
+; AVX512F-FAST-NEXT:    vpermd %zmm0, %zmm1, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm1, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm11, %ymm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm7[3,3,3,3,7,7,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6,7,8],ymm0[9],ymm2[10,11],ymm0[12],ymm2[13,14,15]
+; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm4, %ymm0
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm14[3,3,3,3,7,7,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2],ymm0[3],ymm2[4,5],ymm0[6],ymm2[7,8,9,10],ymm0[11],ymm2[12,13],ymm0[14],ymm2[15]
+; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm22 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31>
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm22, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm5[3,3,3,3,7,7,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [2,2,2,3,8,8,8,9]
-; AVX512F-FAST-NEXT:    vmovdqa 96(%r9), %xmm4
-; AVX512F-FAST-NEXT:    vmovdqa 96(%r8), %xmm5
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3,4],ymm2[5],ymm0[6,7,8,9],ymm2[10],ymm0[11,12],ymm2[13],ymm0[14,15]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [2,2,2,3,8,8,8,9]
+; AVX512F-FAST-NEXT:    vmovdqa 96(%r9), %xmm5
+; AVX512F-FAST-NEXT:    vmovdqa 96(%r8), %xmm6
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm6, %xmm1
-; AVX512F-FAST-NEXT:    vpermt2q %zmm1, %zmm0, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [6,7,3,3,7,7,6,7]
-; AVX512F-FAST-NEXT:    vpermd %ymm7, %ymm0, %ymm0
-; AVX512F-FAST-NEXT:    vpbroadcastd 96(%rax), %ymm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm7, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa %xmm3, %xmm14
+; AVX512F-FAST-NEXT:    vpermt2q %zmm2, %zmm4, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [6,7,3,3,7,7,6,7]
+; AVX512F-FAST-NEXT:    vpermd %ymm10, %ymm2, %ymm3
+; AVX512F-FAST-NEXT:    vpbroadcastd 96(%rax), %ymm4
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm3
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 96(%rsi), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %xmm2
-; AVX512F-FAST-NEXT:    vprold $16, %xmm0, %xmm7
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm2[1,1,2,3]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm9[0,1],xmm7[2],xmm9[3,4],xmm7[5],xmm9[6,7]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm21
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm15 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm15, %xmm29
+; AVX512F-FAST-NEXT:    vmovdqa 96(%rdi), %xmm4
+; AVX512F-FAST-NEXT:    vprold $16, %xmm0, %xmm10
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm4[1,1,2,3]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm11[0,1],xmm10[2],xmm11[3,4],xmm10[5],xmm11[6,7]
+; AVX512F-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm30
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm0, %xmm23
 ; AVX512F-FAST-NEXT:    vmovdqa 96(%rcx), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa 96(%rdx), %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9>
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm0, %xmm7
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm9 = xmm2[1,1,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm9[0],xmm7[1],xmm9[2,3],xmm7[4],xmm9[5,6],xmm7[7]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm30
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; AVX512F-FAST-NEXT:    vmovdqa 96(%rdx), %xmm10
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9>
+; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa %xmm15, %xmm8
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm11 = xmm10[1,1,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm11[0],xmm4[1],xmm11[2,3],xmm4[4],xmm11[5,6],xmm4[7]
+; AVX512F-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm6, %xmm4
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,1,3,8,8,9,9]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm4
+; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm7, %xmm5
+; AVX512F-FAST-NEXT:    vmovdqa %xmm15, %xmm7
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,1,1,3,8,8,9,9]
+; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm3, %zmm5
 ; AVX512F-FAST-NEXT:    vpbroadcastd 100(%rax), %ymm0
-; AVX512F-FAST-NEXT:    vpbroadcastd 104(%rax), %ymm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm0
+; AVX512F-FAST-NEXT:    vpbroadcastd 104(%rax), %ymm6
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 64(%rcx), %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa 64(%rdx), %xmm5
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm22
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
-; AVX512F-FAST-NEXT:    vmovdqa %xmm13, %xmm2
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm31
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; AVX512F-FAST-NEXT:    vmovdqa %xmm8, %xmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,2]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm5[0],xmm0[1],xmm5[2,3],xmm0[4],xmm5[5,6],xmm0[7]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm18
 ; AVX512F-FAST-NEXT:    vmovdqa 64(%rdi), %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa 64(%rsi), %xmm5
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm13 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm15 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm15, %xmm17
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
 ; AVX512F-FAST-NEXT:    vprold $16, %xmm5, %xmm5
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2],xmm0[3,4],xmm5[5],xmm0[6,7]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm25, %xmm0
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm5
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm28, %xmm0
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
+; AVX512F-FAST-NEXT:    vmovdqa %xmm7, %xmm13
+; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm0, %xmm5
+; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,0,0,1,8,9,9,11]
 ; AVX512F-FAST-NEXT:    vpermt2q %zmm5, %zmm6, %zmm0
 ; AVX512F-FAST-NEXT:    vpbroadcastd 64(%rax), %ymm5
 ; AVX512F-FAST-NEXT:    vpbroadcastd 68(%rax), %ymm7
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm5, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm3
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm8, %zmm3
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm8
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm17
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm5
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm16
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
 ; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[1,1,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm8[0],xmm0[1],xmm8[2,3],xmm0[4],xmm8[5,6],xmm0[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm28
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm5[0],xmm0[1],xmm5[2,3],xmm0[4],xmm5[5,6],xmm0[7]
+; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm8
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm14 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm31
-; AVX512F-FAST-NEXT:    vprold $16, %xmm8, %xmm8
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm9
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm28
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3]
+; AVX512F-FAST-NEXT:    vprold $16, %xmm9, %xmm9
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm8[2],xmm0[3,4],xmm8[5],xmm0[6,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm9[2],xmm0[3,4],xmm9[5],xmm0[6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm27
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm19, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX512F-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm8
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpermt2q %zmm8, %zmm6, %zmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm0, %xmm9
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm13, %xmm29
+; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa %xmm14, %xmm2
+; AVX512F-FAST-NEXT:    vpermt2q %zmm9, %zmm6, %zmm0
 ; AVX512F-FAST-NEXT:    vpbroadcastd (%rax), %ymm6
-; AVX512F-FAST-NEXT:    vpbroadcastd 4(%rax), %ymm8
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm1
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm5, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm1[1,1,1,1,5,5,5,5]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7,8,9],ymm5[10],ymm0[11,12],ymm5[13],ymm0[14,15]
+; AVX512F-FAST-NEXT:    vpbroadcastd 4(%rax), %ymm9
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm6, %zmm3
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm8, %zmm3
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm3[1,1,1,1,5,5,5,5]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm6[2],ymm0[3,4],ymm6[5],ymm0[6,7,8,9],ymm6[10],ymm0[11,12],ymm6[13],ymm0[14,15]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm26
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm10[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm1[2,2,2,2,6,6,6,6]
-; AVX512F-FAST-NEXT:    vmovdqa %ymm1, %ymm12
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2,3],ymm0[4],ymm5[5,6,7,8],ymm0[9],ymm5[10,11],ymm0[12],ymm5[13,14,15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm13[10,11,8,9,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,u,u,u,u,26,27,24,25,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm3[2,2,2,2,6,6,6,6]
+; AVX512F-FAST-NEXT:    vmovdqa %ymm3, %ymm14
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm6[0],ymm0[1],ymm6[2,3],ymm0[4],ymm6[5,6,7,8],ymm0[9],ymm6[10,11],ymm0[12],ymm6[13,14,15]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm25
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vprold $16, %ymm1, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm2[1,2,2,3,5,6,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0,1],ymm0[2],ymm5[3,4],ymm0[5],ymm5[6,7,8,9],ymm0[10],ymm5[11,12],ymm0[13],ymm5[14,15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm2[0,0,2,1,4,4,6,5]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1,2],ymm6[3],ymm5[4,5],ymm6[6],ymm5[7,8,9,10],ymm6[11],ymm5[12,13],ymm6[14],ymm5[15]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm24, %zmm5
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm8[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vprold $16, %ymm3, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm9[0,1,1,3,4,5,5,7]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm9[1,2,2,3,5,6,6,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm6[0,1],ymm0[2],ymm6[3,4],ymm0[5],ymm6[6,7,8,9],ymm0[10],ymm6[11,12],ymm0[13],ymm6[14,15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm9[0,0,2,1,4,4,6,5]
+; AVX512F-FAST-NEXT:    vmovdqa %ymm9, %ymm10
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1,2],ymm8[3],ymm6[4,5],ymm8[6],ymm6[7,8,9,10],ymm8[11],ymm6[12,13],ymm8[14],ymm6[15]
+; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm12, %zmm6
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm9[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm12[0,1,1,3,4,5,5,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0,1],ymm0[2],ymm8[3,4],ymm0[5],ymm8[6,7,8,9],ymm0[10],ymm8[11,12],ymm0[13],ymm8[14,15]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm24
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm9[2,2,2,2,6,6,6,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm6[2],ymm0[3,4],ymm6[5],ymm0[6,7,8,9],ymm6[10],ymm0[11,12],ymm6[13],ymm0[14,15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm19
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm18, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <u,u,4,u,u,u,5,u,u,13,u,u,u,14,u,u>
-; AVX512F-FAST-NEXT:    vpermd %zmm0, %zmm6, %zmm0
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm0
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm8 = ymm12[2,2,2,2,6,6,6,6]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm8[2],ymm0[3,4],ymm8[5],ymm0[6,7,8,9],ymm8[10],ymm0[11,12],ymm8[13],ymm0[14,15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm20
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm21, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <u,u,4,u,u,u,5,u,u,13,u,u,u,14,u,u>
+; AVX512F-FAST-NEXT:    vpermd %zmm0, %zmm8, %zmm0
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm8[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm9[3,3,3,3,7,7,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2,3],ymm0[4],ymm5[5,6,7,8],ymm0[9],ymm5[10,11],ymm0[12],ymm5[13,14,15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm9[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm1 = ymm12[3,3,3,3,7,7,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm30, %xmm0
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,3,0,1,2,3,6,7,4,5,6,7,4,5]
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm18
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm7, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm16
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm8
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm11, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm21
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm7, %xmm12
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm9
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm6
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3]
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm0
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm9[0],xmm6[1],xmm9[1],xmm6[2],xmm9[2],xmm6[3],xmm9[3]
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm12[3,3,3,3,7,7,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7,8,9,10],ymm4[11],ymm5[12,13],ymm4[14],ymm5[15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,14,15,12,13,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm14[3,3,3,3,7,7,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm4[0,1,2],ymm1[3],ymm4[4,5],ymm1[6],ymm4[7,8,9,10],ymm1[11],ymm4[12,13],ymm1[14],ymm4[15]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm23, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm1, %ymm4
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm2[3,3,3,3,7,7,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %xmm7
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [2,2,2,3,8,8,8,9]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm2, %zmm1, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [6,7,3,3,7,7,6,7]
-; AVX512F-FAST-NEXT:    vpermd %ymm20, %ymm1, %ymm2
-; AVX512F-FAST-NEXT:    vpbroadcastd 32(%rax), %ymm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm23
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm23
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm21, %xmm2
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm2, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm11, %xmm10
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm31, %xmm2
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm2, %xmm9
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm11
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3]
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm7, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm4, %xmm4
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,1,1,3,8,8,9,9]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm1, %zmm4
-; AVX512F-FAST-NEXT:    vpbroadcastd 36(%rax), %ymm0
-; AVX512F-FAST-NEXT:    vpbroadcastd 40(%rax), %ymm7
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm21
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm21
-; AVX512F-FAST-NEXT:    vprold $16, %xmm11, %xmm0
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm5[1,1,2,3]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0,1],xmm0[2],xmm4[3,4],xmm0[5],xmm4[6,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm22, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm1
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm10[3,3,3,3,7,7,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7,8,9],ymm4[10],ymm1[11,12],ymm4[13],ymm1[14,15]
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %xmm7
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %xmm8
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm11
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [2,2,2,3,8,8,8,9]
+; AVX512F-FAST-NEXT:    vpermt2q %zmm11, %zmm0, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [6,7,3,3,7,7,6,7]
+; AVX512F-FAST-NEXT:    vpermd %ymm19, %ymm0, %ymm2
+; AVX512F-FAST-NEXT:    vpbroadcastd 32(%rax), %ymm11
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm2, %zmm22
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm22
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm30, %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm11[4],xmm5[4],xmm11[5],xmm5[5],xmm11[6],xmm5[6],xmm11[7],xmm5[7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm13, %xmm11
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm14, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm29, %xmm15
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm15, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm8[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm15 = xmm6[1,1,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm15[0],xmm4[1],xmm15[2,3],xmm4[4],xmm15[5,6],xmm4[7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm15, %xmm8
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm5, %xmm11
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm7
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm0
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm5, %xmm3
+; AVX512F-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm29, %xmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,1,1,3,8,8,9,9]
+; AVX512F-FAST-NEXT:    vpermt2q %zmm1, %zmm2, %zmm4
+; AVX512F-FAST-NEXT:    vpbroadcastd 36(%rax), %ymm1
+; AVX512F-FAST-NEXT:    vpbroadcastd 40(%rax), %ymm5
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm10
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm10
+; AVX512F-FAST-NEXT:    vprold $16, %xmm0, %xmm1
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm7[1,1,2,3]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm4[0,1],xmm1[2],xmm4[3,4],xmm1[5],xmm4[6,7]
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm15 = mem[0,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm3 = mem[2,1,3,3]
-; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm2 = mem[2,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm1 = mem[0,2,2,3]
-; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
-; AVX512F-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} ymm8 = ymm4[0,1,2,3,5,5,7,6,8,9,10,11,13,13,15,14]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[3,3,3,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,22,23,22,23,u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,2]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm22, %xmm4
-; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm4[0,2,3,3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [4,5,2,3,4,5,6,7,8,9,10,11,10,11,8,9]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm17, %xmm15
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm15, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm28, %xmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm2, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm23, %xmm3
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm3
+; AVX512F-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm9[u,u,4,5,u,u,u,u,6,7,u,u,u,u,8,9]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm4 = xmm6[1,1,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm4[0],xmm0[1],xmm4[2,3],xmm0[4],xmm4[5,6],xmm0[7]
+; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm23 = mem[0,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm4 = mem[2,1,3,3]
+; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm3 = mem[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm0 = mem[0,2,2,3]
+; AVX512F-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm6[4],xmm9[4],xmm6[5],xmm9[5],xmm6[6],xmm9[6],xmm6[7],xmm9[7]
+; AVX512F-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} ymm9 = ymm5[0,1,2,3,5,5,7,6,8,9,10,11,13,13,15,14]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[3,3,3,3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,22,23,22,23,u,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,2]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm31, %xmm5
+; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm5[0,2,3,3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,2,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,1,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm16, %xmm5
+; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm5[0,2,3,3,4,5,6,7]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,2,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,1,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm17, %xmm4
-; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm4[0,2,3,3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,2,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,1,3]
-; AVX512F-FAST-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm4 = mem[2,1,3,2]
-; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,1,3]
+; AVX512F-FAST-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm6 = mem[2,1,3,2]
+; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm7 = mem[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpermpd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm5 = mem[0,2,2,3]
+; AVX512F-FAST-NEXT:    vmovups %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpermpd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm5 = mem[2,1,3,3]
+; AVX512F-FAST-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpermpd $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # ymm5 = mem[2,2,2,3]
-; AVX512F-FAST-NEXT:    vpermpd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm6 = mem[0,2,2,3]
-; AVX512F-FAST-NEXT:    vmovups %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpermpd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm6 = mem[2,1,3,3]
-; AVX512F-FAST-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpermpd $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm6 = mem[2,2,2,3]
-; AVX512F-FAST-NEXT:    vmovups %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpermpd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm6 = mem[0,2,2,3]
-; AVX512F-FAST-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpermpd $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm6 = mem[2,2,2,3]
-; AVX512F-FAST-NEXT:    vmovups %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovups %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpermpd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm5 = mem[0,2,2,3]
+; AVX512F-FAST-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm31 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm31 = mem[2,2,2,3]
 ; AVX512F-FAST-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm30 # 32-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # ymm30 = mem[0,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm31 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm31 = mem[2,1,3,2]
-; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm29 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm29 = mem[2,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm18[0,1,1,3]
-; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm18 = mem[0,0,1,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm17 = ymm10[0,0,1,1]
-; AVX512F-FAST-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm6 = mem[0,0,2,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm16[0,1,1,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm28[0,0,1,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm16 = ymm9[0,0,1,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm27[0,0,2,1]
+; AVX512F-FAST-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm29 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm29 = mem[2,1,3,2]
+; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm28 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm28 = mem[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm19 = ymm21[0,1,1,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm18 = ymm18[0,0,1,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm17 = ymm8[0,0,1,1]
+; AVX512F-FAST-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm8 = mem[0,0,2,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm21 = ymm12[0,1,1,3]
+; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm12 = mem[0,0,1,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm16 = ymm11[0,0,1,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm27[0,0,2,1]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm27 = ymm26[2,2,2,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm25[0,2,2,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm25 = ymm24[2,1,3,2]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm24 = ymm19[2,2,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm15, %zmm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm2, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm14
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm3 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm11 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm28, %zmm11
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm11 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm12 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm12, %zmm28, %zmm0
-; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm0 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm7
-; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm7
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm19 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm19 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm24 = ymm20[2,2,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm23, %zmm4
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm3, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm20 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm20
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm20, %ymm15
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14, %zmm4 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm23, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm1 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm13 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm13, %zmm23, %zmm2
+; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm2 # 64-byte Folded Reload
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm4 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm1, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogd $226, 124(%r8){1to8}, %ymm3, %ymm1
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm14[0,1,2,3],zmm8[0,1,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm3, %zmm8
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm12 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm22, %zmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm17, %zmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm5, %zmm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm20, %zmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm16, %zmm6
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm5, %zmm6
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm4
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm6
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm1, %zmm20
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm1, %zmm22
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm1, %zmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm29, %zmm31, %zmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm5, %zmm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm26, %zmm27, %zmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm25, %zmm6
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm5, %zmm6
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0]
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm25 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm1, %zmm25
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm20 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm5 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogd $226, 124(%r8){1to8}, %ymm4, %ymm0
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm9
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm15[0,1,2,3],zmm0[4,5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0]
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm4, %zmm9
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm13 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm19, %zmm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm17, %zmm4
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm7, %zmm4
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm21, %zmm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm16, %zmm8
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm7, %zmm8
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm0, %zmm21
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm17 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm8, %zmm0, %zmm17
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm31, %zmm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm28, %zmm29, %zmm4
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm7, %zmm4
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm26, %zmm27, %zmm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm25, %zmm8
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm7, %zmm8
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0]
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm26 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm0, %zmm26
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm29 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm1, %zmm29
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm2, %zmm3
-; AVX512F-FAST-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm1 = mem[0,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm5 = mem[0,1,1,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm1
-; AVX512F-FAST-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm5 = mem[2,1,3,3]
-; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm6 = mem[0,0,1,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm5
-; AVX512F-FAST-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm6 = mem[0,0,2,1]
-; AVX512F-FAST-NEXT:    vpermq $208, {{[-0-9]+}}(%r{{[sb]}}p), %ymm26 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm26 = mem[0,0,1,3]
-; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm27 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm27 = mem[0,0,1,1]
-; AVX512F-FAST-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
-; AVX512F-FAST-NEXT:    # xmm9 = mem[0,2,3,3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,2,1]
-; AVX512F-FAST-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm10 = mem[0,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm24 = mem[0,1,1,3]
-; AVX512F-FAST-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm14 = mem[2,1,3,3]
-; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm15 = mem[0,0,1,1]
-; AVX512F-FAST-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm16 = mem[0,0,2,1]
-; AVX512F-FAST-NEXT:    vpermq $208, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm17 = mem[0,0,1,3]
-; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm18 = mem[0,0,1,1]
-; AVX512F-FAST-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
-; AVX512F-FAST-NEXT:    # xmm13 = mem[0,2,3,3,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,2,1]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm2, %zmm5
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm10, %zmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm14, %zmm10
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm2, %zmm10
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535]
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm14 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm1, %zmm14
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm10, %zmm1, %zmm23
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm26, %zmm6, %zmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm27, %zmm2
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm28, %zmm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm16, %zmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm18, %zmm5
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm28, %zmm5
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0]
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm1, %zmm6
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm1, %zmm21
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm4
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm12
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm8, %zmm0, %zmm29
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm3, %zmm4
+; AVX512F-FAST-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm0 = mem[0,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm7 = mem[0,1,1,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm7 = mem[2,1,3,3]
+; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm8 = mem[0,0,1,1]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512F-FAST-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm8 = mem[0,0,2,1]
+; AVX512F-FAST-NEXT:    vpermq $208, {{[-0-9]+}}(%r{{[sb]}}p), %ymm27 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm27 = mem[0,0,1,3]
+; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm24 = mem[0,0,1,1]
+; AVX512F-FAST-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
+; AVX512F-FAST-NEXT:    # xmm11 = mem[0,2,3,3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,2,1]
+; AVX512F-FAST-NEXT:    vpermq $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm12 = mem[0,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm25 = mem[0,1,1,3]
+; AVX512F-FAST-NEXT:    vpermq $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm15 = mem[2,1,3,3]
+; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm16 = mem[0,0,1,1]
+; AVX512F-FAST-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm28 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm28 = mem[0,0,2,1]
+; AVX512F-FAST-NEXT:    vpermq $208, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm18 = mem[0,0,1,3]
+; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm19 = mem[0,0,1,1]
+; AVX512F-FAST-NEXT:    vpshuflw $248, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
+; AVX512F-FAST-NEXT:    # xmm14 = mem[0,2,3,3,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,2,1]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm3, %zmm7
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm12, %zmm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm15, %zmm12
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm3, %zmm12
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535]
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm15 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm7, %zmm0, %zmm15
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm12, %zmm0, %zmm22
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm27, %zmm8, %zmm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm24, %zmm3
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm23, %zmm3
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm28, %zmm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm19, %zmm7
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm23, %zmm7
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0,0,65535,65535,65535,65535,0,0]
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm0, %zmm8
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm7, %zmm0, %zmm10
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm5
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm13
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, 320(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm21, 256(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm23, 192(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, 320(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm10, 256(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm22, 192(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm29, 128(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, 64(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm22, (%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm20, 448(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, 704(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm14, 640(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm25, 576(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm11, 512(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm12, 384(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm19, 768(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, 832(%rax)
-; AVX512F-FAST-NEXT:    addq $2232, %rsp # imm = 0x8B8
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, 64(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm17, (%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm21, 448(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, 704(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm15, 640(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm26, 576(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 512(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm13, 384(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm20, 768(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, 832(%rax)
+; AVX512F-FAST-NEXT:    addq $2200, %rsp # imm = 0x898
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
index 0ac8246d2e6d4..300a4c5aecc88 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-8.ll
@@ -130,9 +130,10 @@ define void @store_i16_stride8_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],mem[0],xmm2[1],mem[1]
 ; AVX512BW-NEXT:    vpunpckldq {{.*#+}} xmm2 = xmm3[0],mem[0],xmm3[1],mem[1]
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,2,4,6,16,18,20,22,1,3,5,7,17,19,21,23]
-; AVX512BW-NEXT:    vpermi2w %ymm1, %ymm0, %ymm2
-; AVX512BW-NEXT:    vmovdqa %ymm2, (%rax)
+; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15]
+; AVX512BW-NEXT:    vpermw %ymm0, %ymm1, %ymm0
+; AVX512BW-NEXT:    vmovdqa %ymm0, (%rax)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %in.vec0 = load <2 x i16>, ptr %in.vecptr0, align 64
@@ -283,34 +284,34 @@ define void @store_i16_stride8_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-SLOW-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX2-SLOW-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
 ; AVX2-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm4
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm5
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm2[0,1,2,0,4,5,6,4]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,7,5,6,4,8,9,10,11,15,13,14,12]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm5[0,1,2,0,4,5,6,4]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,6,4,7,5,8,9,10,11,14,12,15,13]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2],ymm3[3],ymm6[4,5],ymm3[6],ymm6[7]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm0[0,2,2,3,4,6,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[1,3,0,2,4,5,6,7,9,11,8,10,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm4[0,2,2,3,4,6,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm6[0,2,1,3,4,5,6,7,8,10,9,11,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0],ymm1[1],ymm6[2,3],ymm1[4],ymm6[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3],ymm1[4,5],ymm3[6,7]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,1,1,3,4,5,5,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,5,7,4,6,8,9,10,11,13,15,12,14]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm5[0,1,1,3,4,5,5,7]
-; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,6,5,7,8,9,10,11,12,14,13,15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7]
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm1
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm1[0,1,2,0,4,5,6,4]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,6,4,7,5,8,9,10,11,14,12,15,13]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm3[0,1,2,0,4,5,6,4]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,7,5,6,4,8,9,10,11,15,13,14,12]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3],ymm2[4,5],ymm4[6],ymm2[7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm0[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm4[0,2,1,3,4,5,6,7,8,10,9,11,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm0[2,3,0,1]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm5[0,2,2,3,4,6,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm6[1,3,0,2,4,5,6,7,9,11,8,10,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2,3],ymm6[4],ymm4[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,1,1,3,4,5,5,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,6,5,7,8,9,10,11,12,14,13,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,1,1,3,4,5,5,7]
+; AVX2-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,5,7,4,6,8,9,10,11,13,15,12,14]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3],ymm1[4,5],ymm3[6],ymm1[7]
 ; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[3,1,2,3,7,5,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[3,1,2,0,4,5,6,7,11,9,10,8,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm4[3,1,2,3,7,5,6,7]
-; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[2,0,3,1,4,5,6,7,10,8,11,9,12,13,14,15]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6,7]
-; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[2,0,3,1,4,5,6,7,10,8,11,9,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm5[3,1,2,3,7,5,6,7]
+; AVX2-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[3,1,2,0,4,5,6,7,11,9,10,8,12,13,14,15]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3],ymm3[4],ymm0[5,6,7]
+; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm0, 32(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm1, (%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm2, (%rax)
 ; AVX2-SLOW-NEXT:    vzeroupper
 ; AVX2-SLOW-NEXT:    retq
 ;
@@ -371,26 +372,26 @@ define void @store_i16_stride8_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-PERLANE-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX2-FAST-PERLANE-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
 ; AVX2-FAST-PERLANE-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm5
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm3 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm6 = ymm5[u,u,u,u,u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,26,27]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2],ymm3[3],ymm6[4,5],ymm3[6],ymm6[7]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm6 = ymm4[0,1,8,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0],ymm1[1],ymm6[2,3],ymm1[4],ymm6[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3],ymm1[4,5],ymm3[6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,30,31]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm3 = ymm4[4,5,12,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u,u,u,u,u]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6,7]
-; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,26,27]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm4 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3],ymm2[4,5],ymm4[6],ymm2[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm4 = ymm0[0,1,8,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm0[2,3,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm6 = ymm5[u,u,u,u,0,1,8,9,u,u,u,u,u,u,u,u,18,19,26,27,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2,3],ymm6[4],ymm4[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,30,31]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3],ymm1[4,5],ymm3[6],ymm1[7]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[4,5,12,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,4,5,12,13,u,u,u,u,u,u,u,u,22,23,30,31,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3],ymm3[4],ymm0[5,6,7]
+; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, 32(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm1, (%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, (%rax)
 ; AVX2-FAST-PERLANE-NEXT:    vzeroupper
 ; AVX2-FAST-PERLANE-NEXT:    retq
 ;
@@ -411,33 +412,33 @@ define void @store_i16_stride8_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX512F-SLOW-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
 ; AVX512F-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm4
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm5
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm2[0,1,1,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,5,7,4,6,8,9,10,11,13,15,12,14]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm5[0,1,1,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,6,5,7,8,9,10,11,12,14,13,15]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1,2],ymm3[3],ymm6[4,5],ymm3[6],ymm6[7]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm0[3,1,2,3,7,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[3,1,2,0,4,5,6,7,11,9,10,8,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm4[3,1,2,3,7,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm6[2,0,3,1,4,5,6,7,10,8,11,9,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm6[0],ymm1[1],ymm6[2,3],ymm1[4],ymm6[5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3],ymm1[4,5],ymm3[6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,7,5,6,4,8,9,10,11,15,13,14,12]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm5[0,1,2,0,4,5,6,4]
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,6,4,7,5,8,9,10,11,14,12,15,13]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3],ymm3[4,5],ymm2[6],ymm3[7]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm1[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,6,5,7,8,9,10,11,12,14,13,15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm3[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,5,7,4,6,8,9,10,11,13,15,12,14]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3],ymm2[4,5],ymm4[6],ymm2[7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm0[3,1,2,3,7,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm4[2,0,3,1,4,5,6,7,10,8,11,9,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm0[2,3,0,1]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm5[3,1,2,3,7,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm6[3,1,2,0,4,5,6,7,11,9,10,8,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2,3],ymm6[4],ymm4[5,6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,6,4,7,5,8,9,10,11,14,12,15,13]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,1,2,0,4,5,6,4]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,7,5,6,4,8,9,10,11,15,13,14,12]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3],ymm1[4,5],ymm3[6],ymm1[7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[1,3,0,2,4,5,6,7,9,11,8,10,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm4[0,2,2,3,4,6,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[0,2,1,3,4,5,6,7,8,10,9,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6,7]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,2,1,3,4,5,6,7,8,10,9,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm5[0,2,2,3,4,6,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[1,3,0,2,4,5,6,7,9,11,8,10,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2,3],ymm3[4],ymm0[5,6,7]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
@@ -499,11 +500,12 @@ define void @store_i16_stride8_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX512BW-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
+; AVX512BW-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512BW-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm1
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,4,8,12,32,36,40,44,1,5,9,13,33,37,41,45,2,6,10,14,34,38,42,46,3,7,11,15,35,39,43,47]
-; AVX512BW-NEXT:    vpermi2w %zmm1, %zmm0, %zmm2
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, (%rax)
+; AVX512BW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,4,8,12,16,20,24,28,1,5,9,13,17,21,25,29,2,6,10,14,18,22,26,30,3,7,11,15,19,23,27,31]
+; AVX512BW-NEXT:    vpermw %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512BW-NEXT:    vzeroupper
 ; AVX512BW-NEXT:    retq
   %in.vec0 = load <4 x i16>, ptr %in.vecptr0, align 64

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
index 8b2b0adbe0498..b98c60eab6d7c 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-3.ll
@@ -48,34 +48,20 @@ define void @store_i32_stride3_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX1-ONLY-NEXT:    vzeroupper
 ; AVX1-ONLY-NEXT:    retq
 ;
-; AVX2-ONLY-LABEL: store_i32_stride3_vf2:
-; AVX2-ONLY:       # %bb.0:
-; AVX2-ONLY-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX2-ONLY-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
-; AVX2-ONLY-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
-; AVX2-ONLY-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-ONLY-NEXT:    vmovaps {{.*#+}} ymm1 = <0,2,4,1,3,5,u,u>
-; AVX2-ONLY-NEXT:    vpermps %ymm0, %ymm1, %ymm0
-; AVX2-ONLY-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX2-ONLY-NEXT:    vmovlps %xmm1, 16(%rcx)
-; AVX2-ONLY-NEXT:    vmovaps %xmm0, (%rcx)
-; AVX2-ONLY-NEXT:    vzeroupper
-; AVX2-ONLY-NEXT:    retq
-;
-; AVX512-LABEL: store_i32_stride3_vf2:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
-; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX512-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX512-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,2,8,1,3,9,u,u>
-; AVX512-NEXT:    vpermi2d %ymm0, %ymm1, %ymm2
-; AVX512-NEXT:    vextracti128 $1, %ymm2, %xmm0
-; AVX512-NEXT:    vmovq %xmm0, 16(%rcx)
-; AVX512-NEXT:    vmovdqa %xmm2, (%rcx)
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX2-LABEL: store_i32_stride3_vf2:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX2-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX2-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX2-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX2-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT:    vmovaps {{.*#+}} ymm1 = <0,2,4,1,3,5,u,u>
+; AVX2-NEXT:    vpermps %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX2-NEXT:    vmovlps %xmm1, 16(%rcx)
+; AVX2-NEXT:    vmovaps %xmm0, (%rcx)
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
   %in.vec0 = load <2 x i32>, ptr %in.vecptr0, align 64
   %in.vec1 = load <2 x i32>, ptr %in.vecptr1, align 64
   %in.vec2 = load <2 x i32>, ptr %in.vecptr2, align 64
@@ -2802,7 +2788,7 @@ define void @store_i32_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; AVX: {{.*}}
 ; AVX1: {{.*}}
-; AVX2: {{.*}}
+; AVX2-ONLY: {{.*}}
 ; AVX512BW: {{.*}}
 ; AVX512BW-FAST: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll
index 0dbb87518fe16..12aef44e90784 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-4.ll
@@ -93,19 +93,65 @@ define void @store_i32_stride4_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX2-FAST-PERLANE-NEXT:    vzeroupper
 ; AVX2-FAST-PERLANE-NEXT:    retq
 ;
-; AVX512-LABEL: store_i32_stride4_vf2:
-; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
-; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX512-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX512-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,2,8,10,1,3,9,11]
-; AVX512-NEXT:    vpermi2d %ymm1, %ymm0, %ymm2
-; AVX512-NEXT:    vmovdqa %ymm2, (%r8)
-; AVX512-NEXT:    vzeroupper
-; AVX512-NEXT:    retq
+; AVX512F-SLOW-LABEL: store_i32_stride4_vf2:
+; AVX512F-SLOW:       # %bb.0:
+; AVX512F-SLOW-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-SLOW-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-SLOW-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512F-SLOW-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-SLOW-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512F-SLOW-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX512F-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
+; AVX512F-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX512F-SLOW-NEXT:    vmovaps %ymm0, (%r8)
+; AVX512F-SLOW-NEXT:    vzeroupper
+; AVX512F-SLOW-NEXT:    retq
+;
+; AVX512F-FAST-LABEL: store_i32_stride4_vf2:
+; AVX512F-FAST:       # %bb.0:
+; AVX512F-FAST-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512F-FAST-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-FAST-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512F-FAST-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512F-FAST-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512F-FAST-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX512F-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vmovaps {{.*#+}} ymm1 = [0,2,4,6,1,3,5,7]
+; AVX512F-FAST-NEXT:    vpermps %ymm0, %ymm1, %ymm0
+; AVX512F-FAST-NEXT:    vmovaps %ymm0, (%r8)
+; AVX512F-FAST-NEXT:    vzeroupper
+; AVX512F-FAST-NEXT:    retq
+;
+; AVX512BW-SLOW-LABEL: store_i32_stride4_vf2:
+; AVX512BW-SLOW:       # %bb.0:
+; AVX512BW-SLOW-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512BW-SLOW-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512BW-SLOW-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512BW-SLOW-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512BW-SLOW-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512BW-SLOW-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX512BW-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
+; AVX512BW-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; AVX512BW-SLOW-NEXT:    vmovaps %ymm0, (%r8)
+; AVX512BW-SLOW-NEXT:    vzeroupper
+; AVX512BW-SLOW-NEXT:    retq
+;
+; AVX512BW-FAST-LABEL: store_i32_stride4_vf2:
+; AVX512BW-FAST:       # %bb.0:
+; AVX512BW-FAST-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512BW-FAST-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512BW-FAST-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512BW-FAST-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512BW-FAST-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512BW-FAST-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX512BW-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-FAST-NEXT:    vmovaps {{.*#+}} ymm1 = [0,2,4,6,1,3,5,7]
+; AVX512BW-FAST-NEXT:    vpermps %ymm0, %ymm1, %ymm0
+; AVX512BW-FAST-NEXT:    vmovaps %ymm0, (%r8)
+; AVX512BW-FAST-NEXT:    vzeroupper
+; AVX512BW-FAST-NEXT:    retq
   %in.vec0 = load <2 x i32>, ptr %in.vecptr0, align 64
   %in.vec1 = load <2 x i32>, ptr %in.vecptr1, align 64
   %in.vec2 = load <2 x i32>, ptr %in.vecptr2, align 64
@@ -193,13 +239,14 @@ define void @store_i32_stride4_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512-LABEL: store_i32_stride4_vf4:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512-NEXT:    vmovdqa (%rdx), %xmm1
-; AVX512-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
-; AVX512-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,4,16,20,1,5,17,21,2,6,18,22,3,7,19,23]
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
-; AVX512-NEXT:    vmovdqa64 %zmm2, (%r8)
+; AVX512-NEXT:    vmovaps (%rdi), %xmm0
+; AVX512-NEXT:    vmovaps (%rdx), %xmm1
+; AVX512-NEXT:    vinsertf128 $1, (%rcx), %ymm1, %ymm1
+; AVX512-NEXT:    vinsertf128 $1, (%rsi), %ymm0, %ymm0
+; AVX512-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT:    vmovaps {{.*#+}} zmm1 = [0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15]
+; AVX512-NEXT:    vpermps %zmm0, %zmm1, %zmm0
+; AVX512-NEXT:    vmovaps %zmm0, (%r8)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %in.vec0 = load <4 x i32>, ptr %in.vecptr0, align 64
@@ -2928,18 +2975,14 @@ define void @store_i32_stride4_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX: {{.*}}
 ; AVX1: {{.*}}
 ; AVX2: {{.*}}
-; AVX512BW-FAST: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
-; AVX512BW-SLOW: {{.*}}
 ; AVX512DQ-FAST: {{.*}}
 ; AVX512DQ-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}
 ; AVX512DQBW-SLOW: {{.*}}
-; AVX512F-FAST: {{.*}}
 ; AVX512F-ONLY-FAST: {{.*}}
 ; AVX512F-ONLY-SLOW: {{.*}}
-; AVX512F-SLOW: {{.*}}
 ; FALLBACK0: {{.*}}
 ; FALLBACK1: {{.*}}
 ; FALLBACK10: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
index 92de9a0d295c2..77cd7284b97a7 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-5.ll
@@ -87,19 +87,20 @@ define void @store_i32_stride5_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512-LABEL: store_i32_stride5_vf2:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
-; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX512-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <0,2,4,6,16,1,3,5,7,17,u,u,u,u,u,u>
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
-; AVX512-NEXT:    vextracti32x4 $2, %zmm2, %xmm0
-; AVX512-NEXT:    vmovq %xmm0, 32(%r9)
-; AVX512-NEXT:    vmovdqa %ymm2, (%r9)
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX512-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vinsertf32x4 $2, %xmm1, %zmm0, %zmm0
+; AVX512-NEXT:    vmovaps {{.*#+}} zmm1 = <0,2,4,6,8,1,3,5,7,9,u,u,u,u,u,u>
+; AVX512-NEXT:    vpermps %zmm0, %zmm1, %zmm0
+; AVX512-NEXT:    vextractf32x4 $2, %zmm0, %xmm1
+; AVX512-NEXT:    vmovlps %xmm1, 32(%r9)
+; AVX512-NEXT:    vmovaps %ymm0, (%r9)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %in.vec0 = load <2 x i32>, ptr %in.vecptr0, align 64

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
index e60de577a61bf..fdae93ac90e5b 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-6.ll
@@ -141,20 +141,21 @@ define void @store_i32_stride6_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-LABEL: store_i32_stride6_vf2:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
-; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX512-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX512-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
-; AVX512-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
-; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,2,4,6,16,18,1,3,5,7,17,19,u,u,u,u>
-; AVX512-NEXT:    vpermi2d %zmm2, %zmm0, %zmm1
-; AVX512-NEXT:    vextracti32x4 $2, %zmm1, 32(%rax)
-; AVX512-NEXT:    vmovdqa %ymm1, (%rax)
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
+; AVX512-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    vinsertf32x4 $2, %xmm2, %zmm0, %zmm0
+; AVX512-NEXT:    vmovaps {{.*#+}} zmm1 = <0,2,4,6,8,10,1,3,5,7,9,11,u,u,u,u>
+; AVX512-NEXT:    vpermps %zmm0, %zmm1, %zmm0
+; AVX512-NEXT:    vextractf32x4 $2, %zmm0, 32(%rax)
+; AVX512-NEXT:    vmovaps %ymm0, (%rax)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %in.vec0 = load <2 x i32>, ptr %in.vecptr0, align 64
@@ -228,7 +229,7 @@ define void @store_i32_stride6_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm11
 ; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} ymm12 = ymm6[0],ymm11[0],ymm6[2],ymm11[2]
 ; AVX1-ONLY-NEXT:    vpermilps {{.*#+}} ymm12 = ymm12[0,2,3,1,4,6,7,5]
-; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm13 = xmm3[0],xmm1[0]
+; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm13 = xmm3[0,0],xmm1[0,0]
 ; AVX1-ONLY-NEXT:    vpermilps {{.*#+}} xmm13 = xmm13[0,1,2,0]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm13[2,3],ymm12[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5],ymm12[6,7]

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
index 966b4c7f6c210..8f2eeedc7fe6c 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-7.ll
@@ -226,8 +226,9 @@ define void @store_i32_stride7_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,2,4,u>
 ; AVX512F-FAST-NEXT:    vpermi2q %ymm3, %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,2,4,6,16,18,20,1,3,5,7,17,19,21,u,u>
-; AVX512F-FAST-NEXT:    vpermi2d %zmm1, %zmm2, %zmm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,2,4,6,8,10,12,1,3,5,7,9,11,13,u,u>
+; AVX512F-FAST-NEXT:    vpermd %zmm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vextracti32x4 $2, %zmm0, 32(%rax)
 ; AVX512F-FAST-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
 ; AVX512F-FAST-NEXT:    vmovq %xmm1, 48(%rax)
@@ -277,8 +278,9 @@ define void @store_i32_stride7_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,2,4,u>
 ; AVX512BW-FAST-NEXT:    vpermi2q %ymm3, %ymm0, %ymm1
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <0,2,4,6,16,18,20,1,3,5,7,17,19,21,u,u>
-; AVX512BW-FAST-NEXT:    vpermi2d %zmm1, %zmm2, %zmm0
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm0
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <0,2,4,6,8,10,12,1,3,5,7,9,11,13,u,u>
+; AVX512BW-FAST-NEXT:    vpermd %zmm0, %zmm1, %zmm0
 ; AVX512BW-FAST-NEXT:    vextracti32x4 $2, %zmm0, 32(%rax)
 ; AVX512BW-FAST-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
 ; AVX512BW-FAST-NEXT:    vmovq %xmm1, 48(%rax)
@@ -390,7 +392,7 @@ define void @store_i32_stride7_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX1-ONLY-NEXT:    vbroadcastss (%r10), %ymm10
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm10[6,7]
 ; AVX1-ONLY-NEXT:    vunpcklps {{.*#+}} ymm5 = ymm7[0],ymm5[0],ymm7[1],ymm5[1],ymm7[4],ymm5[4],ymm7[5],ymm5[5]
-; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm4[0],xmm3[0]
+; AVX1-ONLY-NEXT:    vshufps {{.*#+}} xmm3 = xmm4[0,0],xmm3[0,0]
 ; AVX1-ONLY-NEXT:    vpermilps {{.*#+}} xmm3 = xmm3[0,1,2,0]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3],ymm5[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm8[4,5,6],ymm3[7]

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-8.ll
index 41bbf75accf05..937cd05e40629 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i32-stride-8.ll
@@ -104,23 +104,24 @@ define void @store_i32_stride8_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
-; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; AVX512-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX512-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX512-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
-; AVX512-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
-; AVX512-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
-; AVX512-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
-; AVX512-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm1
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,2,4,6,16,18,20,22,1,3,5,7,17,19,21,23]
-; AVX512-NEXT:    vpermi2d %zmm1, %zmm0, %zmm2
-; AVX512-NEXT:    vmovdqa64 %zmm2, (%rax)
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512-NEXT:    vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm4[0],xmm3[0]
+; AVX512-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX512-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512-NEXT:    vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT:    vmovaps {{.*#+}} zmm1 = [0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15]
+; AVX512-NEXT:    vpermps %zmm0, %zmm1, %zmm0
+; AVX512-NEXT:    vmovaps %zmm0, (%rax)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %in.vec0 = load <2 x i32>, ptr %in.vecptr0, align 64

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll
index 4f2917cb12401..13142652bd421 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-4.ll
@@ -64,13 +64,14 @@ define void @store_i64_stride4_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512-LABEL: store_i64_stride4_vf2:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512-NEXT:    vmovdqa (%rdx), %xmm1
-; AVX512-NEXT:    vinserti128 $1, (%rsi), %ymm0, %ymm0
-; AVX512-NEXT:    vinserti128 $1, (%rcx), %ymm1, %ymm1
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,2,8,10,1,3,9,11]
-; AVX512-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
-; AVX512-NEXT:    vmovdqa64 %zmm2, (%r8)
+; AVX512-NEXT:    vmovaps (%rdi), %xmm0
+; AVX512-NEXT:    vmovaps (%rdx), %xmm1
+; AVX512-NEXT:    vinsertf128 $1, (%rcx), %ymm1, %ymm1
+; AVX512-NEXT:    vinsertf128 $1, (%rsi), %ymm0, %ymm0
+; AVX512-NEXT:    vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT:    vmovaps {{.*#+}} zmm1 = [0,2,4,6,1,3,5,7]
+; AVX512-NEXT:    vpermpd %zmm0, %zmm1, %zmm0
+; AVX512-NEXT:    vmovaps %zmm0, (%r8)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %in.vec0 = load <2 x i64>, ptr %in.vecptr0, align 64

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll
index c7ce9a858bc2f..c94df69efc80d 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i64-stride-6.ll
@@ -178,23 +178,24 @@ define void @store_i64_stride6_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX1-ONLY-NEXT:    vmovaps (%rdx), %ymm2
 ; AVX1-ONLY-NEXT:    vmovapd (%r8), %ymm3
 ; AVX1-ONLY-NEXT:    vmovapd (%r9), %ymm4
-; AVX1-ONLY-NEXT:    vmovaps (%rcx), %xmm5
-; AVX1-ONLY-NEXT:    vmovaps (%rdx), %xmm6
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm7 = xmm6[1],xmm5[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, (%r9), %ymm7, %ymm8
-; AVX1-ONLY-NEXT:    vbroadcastsd 8(%r8), %ymm9
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm9[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm8[2,3],ymm7[4,5],ymm8[6,7]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm5 = mem[0,0]
+; AVX1-ONLY-NEXT:    vmovaps (%rsi), %xmm6
+; AVX1-ONLY-NEXT:    vmovaps (%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm8 = xmm7[1],xmm6[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm0, %ymm8
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm8 = ymm3[0,1],ymm8[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm5 = ymm8[0],ymm5[1],ymm8[2,3]
+; AVX1-ONLY-NEXT:    vmovaps (%rcx), %xmm8
+; AVX1-ONLY-NEXT:    vmovaps (%rdx), %xmm9
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm10 = xmm9[1],xmm8[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, (%r9), %ymm10, %ymm11
+; AVX1-ONLY-NEXT:    vbroadcastsd 8(%r8), %ymm12
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm12[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm11[2,3],ymm10[4,5],ymm11[6,7]
 ; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} ymm2 = ymm2[0],mem[0],ymm2[2],mem[2]
-; AVX1-ONLY-NEXT:    vmovaps 16(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm8 = xmm8[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm8[0,1,2,3],ymm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm8 = mem[0,0]
-; AVX1-ONLY-NEXT:    vmovaps (%rsi), %xmm9
-; AVX1-ONLY-NEXT:    vmovaps (%rdi), %xmm10
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm11 = xmm10[1],xmm9[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm3, %ymm11
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm8 = ymm11[0,1],ymm8[2,3],ymm11[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovaps 16(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm11 = xmm11[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm11[0,1,2,3],ymm2[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm4[2,3],ymm1[2,3]
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
 ; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3]
@@ -204,14 +205,14 @@ define void @store_i64_stride6_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX1-ONLY-NEXT:    vbroadcastsd 24(%r8), %ymm3
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3]
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm4[3]
-; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm10[0],xmm9[0]
-; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm6[0],xmm5[0]
+; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm3 = xmm7[0],xmm6[0]
+; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm9[0],xmm8[0]
 ; AVX1-ONLY-NEXT:    vmovaps %xmm4, 16(%rax)
 ; AVX1-ONLY-NEXT:    vmovaps %xmm3, (%rax)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm0, 128(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %ymm8, 32(%rax)
 ; AVX1-ONLY-NEXT:    vmovaps %ymm2, 96(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %ymm7, 64(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %ymm10, 64(%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm5, 32(%rax)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm1, 160(%rax)
 ; AVX1-ONLY-NEXT:    vzeroupper
 ; AVX1-ONLY-NEXT:    retq
@@ -417,93 +418,95 @@ define void @store_i64_stride6_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX1-ONLY-LABEL: store_i64_stride6_vf8:
 ; AVX1-ONLY:       # %bb.0:
-; AVX1-ONLY-NEXT:    vmovapd 32(%rdi), %ymm8
-; AVX1-ONLY-NEXT:    vmovapd 32(%rsi), %ymm14
-; AVX1-ONLY-NEXT:    vmovapd (%r8), %ymm12
+; AVX1-ONLY-NEXT:    vmovapd 32(%rdi), %ymm12
+; AVX1-ONLY-NEXT:    vmovapd (%r8), %ymm11
 ; AVX1-ONLY-NEXT:    vmovapd 32(%r8), %ymm13
-; AVX1-ONLY-NEXT:    vmovapd 32(%r9), %ymm9
-; AVX1-ONLY-NEXT:    vmovaps (%rcx), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps 32(%rcx), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps 32(%rdx), %xmm4
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm4[1],xmm2[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, 32(%r9), %ymm0, %ymm3
-; AVX1-ONLY-NEXT:    vbroadcastsd 40(%r8), %ymm5
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5],ymm3[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps (%rdx), %xmm5
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm5[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, (%r9), %ymm3, %ymm6
-; AVX1-ONLY-NEXT:    vbroadcastsd 8(%r8), %ymm7
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm7[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm6[2,3],ymm3[4,5],ymm6[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm6 = mem[0,0]
-; AVX1-ONLY-NEXT:    vmovaps (%rsi), %xmm7
-; AVX1-ONLY-NEXT:    vmovaps 32(%rsi), %xmm11
-; AVX1-ONLY-NEXT:    vmovaps (%rdi), %xmm10
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm15 = xmm10[1],xmm7[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm15, %ymm12, %ymm15
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm15[0,1],ymm6[2,3],ymm15[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; AVX1-ONLY-NEXT:    vmovaps (%rsi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 32(%rsi), %xmm4
+; AVX1-ONLY-NEXT:    vmovaps (%rdi), %xmm3
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm3[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm11[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps (%rcx), %xmm5
+; AVX1-ONLY-NEXT:    vmovaps 32(%rcx), %xmm6
+; AVX1-ONLY-NEXT:    vmovaps 32(%rdx), %xmm7
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm7[1],xmm6[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, 32(%r9), %ymm2, %ymm8
+; AVX1-ONLY-NEXT:    vbroadcastsd 40(%r8), %ymm9
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm9[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm8[2,3],ymm2[4,5],ymm8[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm9[2,3],ymm14[2,3]
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm8 = ymm8[1],ymm14[1],ymm8[3],ymm14[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm13[2,3],ymm8[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm8 = ymm8[0],ymm15[0],ymm8[2],ymm15[3]
+; AVX1-ONLY-NEXT:    vmovaps (%rdx), %xmm9
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm8 = xmm9[1],xmm5[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, (%r9), %ymm8, %ymm10
+; AVX1-ONLY-NEXT:    vbroadcastsd 8(%r8), %ymm14
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],ymm14[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm10[2,3],ymm8[4,5],ymm10[6,7]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm10 = mem[0,0]
 ; AVX1-ONLY-NEXT:    vmovaps 32(%rdi), %xmm14
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm15 = xmm14[1],xmm11[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm15, %ymm13, %ymm13
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm15 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm15[2,3],ymm13[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovapd (%rdi), %ymm15
-; AVX1-ONLY-NEXT:    vmovapd (%rsi), %ymm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm15 = ymm15[1],ymm0[1],ymm15[3],ymm0[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm12[2,3],ymm15[2,3]
-; AVX1-ONLY-NEXT:    vmovapd (%r9), %ymm15
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm12 = ymm12[0],ymm0[0],ymm12[2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovapd 48(%rdx), %xmm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 56(%r8), %ymm3
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm9 = ymm0[0,1,2],ymm9[3]
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm15 = xmm14[1],xmm4[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm15, %ymm0, %ymm15
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm15 = ymm13[0,1],ymm15[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm10 = ymm15[0],ymm10[1],ymm15[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 32(%rsi), %ymm15
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm12 = ymm12[1],ymm15[1],ymm12[3],ymm15[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm13[2,3],ymm12[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 32(%r9), %ymm0
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm0[2,3],ymm15[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm12 = ymm12[0],ymm13[0],ymm12[2],ymm13[3]
+; AVX1-ONLY-NEXT:    vmovapd (%rdi), %ymm13
+; AVX1-ONLY-NEXT:    vmovapd (%rsi), %ymm15
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm13 = ymm13[1],ymm15[1],ymm13[3],ymm15[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm11[2,3],ymm13[2,3]
+; AVX1-ONLY-NEXT:    vmovapd (%r9), %ymm1
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm1[2,3],ymm15[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm13 = ymm11[0],ymm13[0],ymm11[2],ymm13[3]
+; AVX1-ONLY-NEXT:    vmovapd 48(%rdx), %xmm11
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm11 = xmm11[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 56(%r8), %ymm15
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm11 = ymm11[0,1],ymm15[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm11 = ymm11[0,1,2],ymm0[3]
 ; AVX1-ONLY-NEXT:    vmovaps 48(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 48(%rcx), %ymm3
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm3[6,7]
-; AVX1-ONLY-NEXT:    vmovapd 16(%rdx), %xmm3
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm3[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 24(%r8), %ymm6
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm6[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm15[3]
-; AVX1-ONLY-NEXT:    vmovaps 16(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 48(%rcx), %ymm15
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm15[6,7]
+; AVX1-ONLY-NEXT:    vmovapd 16(%rdx), %xmm15
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm15 = xmm15[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 24(%r8), %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm15[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3]
+; AVX1-ONLY-NEXT:    vmovaps 16(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],mem[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vbroadcastsd 16(%rcx), %ymm15
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm15[6,7]
-; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm11 = xmm14[0],xmm11[0]
-; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm4[0],xmm2[0]
-; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm10[0],xmm7[0]
-; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm5[0],xmm1[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm15[6,7]
+; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm14[0],xmm4[0]
+; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm7[0],xmm6[0]
+; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm3 = xmm3[0],mem[0]
+; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm9[0],xmm5[0]
 ; AVX1-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, 16(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm4, (%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 208(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm11, 192(%rax)
-; AVX1-ONLY-NEXT:    vmovapd %ymm12, 128(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %ymm13, 224(%rax)
-; AVX1-ONLY-NEXT:    vmovapd %ymm8, 320(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm1, 32(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %ymm6, 96(%rax)
-; AVX1-ONLY-NEXT:    vmovapd %ymm3, 160(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm5, 16(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm3, (%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm6, 208(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm4, 192(%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm13, 128(%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm12, 320(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %ymm2, 96(%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm1, 160(%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm10, 224(%rax)
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 288(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 64(%rax)
-; AVX1-ONLY-NEXT:    vmovapd %ymm9, 352(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %ymm8, 64(%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm11, 352(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 256(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 32(%rax)
 ; AVX1-ONLY-NEXT:    vzeroupper
 ; AVX1-ONLY-NEXT:    retq
 ;
@@ -1033,16 +1036,27 @@ define void @store_i64_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX1-ONLY-LABEL: store_i64_stride6_vf16:
 ; AVX1-ONLY:       # %bb.0:
-; AVX1-ONLY-NEXT:    subq $472, %rsp # imm = 0x1D8
-; AVX1-ONLY-NEXT:    vmovapd (%r8), %ymm0
+; AVX1-ONLY-NEXT:    subq $456, %rsp # imm = 0x1C8
+; AVX1-ONLY-NEXT:    vmovapd (%r8), %ymm13
+; AVX1-ONLY-NEXT:    vmovapd 32(%r8), %ymm0
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-ONLY-NEXT:    vmovaps (%rsi), %xmm3
+; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 32(%rsi), %xmm4
+; AVX1-ONLY-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 64(%rsi), %xmm5
+; AVX1-ONLY-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps (%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm13[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps (%rcx), %xmm2
 ; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 32(%rcx), %xmm4
-; AVX1-ONLY-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 64(%rcx), %xmm6
+; AVX1-ONLY-NEXT:    vmovaps 32(%rcx), %xmm6
 ; AVX1-ONLY-NEXT:    vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 96(%rcx), %xmm5
-; AVX1-ONLY-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps (%rdx), %xmm1
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
@@ -1051,120 +1065,113 @@ define void @store_i64_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 32(%rdx), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, 32(%r9), %ymm1, %ymm2
-; AVX1-ONLY-NEXT:    vbroadcastsd 40(%r8), %ymm3
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-ONLY-NEXT:    vmovaps 32(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm4[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm0[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 32(%rdx), %xmm14
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm14[1],xmm6[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 40(%r8), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, 32(%r9), %ymm1, %ymm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 64(%rdx), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm6[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, 64(%r9), %ymm1, %ymm2
+; AVX1-ONLY-NEXT:    vmovaps 64(%rdi), %xmm9
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm9[1],xmm5[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vmovapd 64(%r8), %ymm1
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm1[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm3 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 64(%rcx), %xmm8
+; AVX1-ONLY-NEXT:    vmovaps 64(%rdx), %xmm7
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm7[1],xmm8[1]
 ; AVX1-ONLY-NEXT:    vbroadcastsd 72(%r8), %ymm3
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 96(%rdx), %xmm12
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm12[1],xmm5[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, 96(%r9), %ymm1, %ymm2
-; AVX1-ONLY-NEXT:    vbroadcastsd 104(%r8), %ymm3
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm3[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5],ymm2[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps (%rsi), %xmm14
-; AVX1-ONLY-NEXT:    vmovaps (%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm9[1],xmm14[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm2 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd (%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vmovapd (%rsi), %ymm2
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovapd (%r9), %ymm10
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm10[2,3],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 32(%r8), %ymm0
-; AVX1-ONLY-NEXT:    vmovaps 32(%rsi), %xmm7
-; AVX1-ONLY-NEXT:    vmovaps 32(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm7[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm2 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 32(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vmovapd 32(%rsi), %ymm2
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 32(%r9), %ymm8
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm8[2,3],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[3]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm2[0,1,2,3],ymm3[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, 64(%r9), %ymm2, %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 96(%rsi), %xmm6
+; AVX1-ONLY-NEXT:    vmovaps 96(%rdi), %xmm5
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm5[1],xmm6[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vmovapd 96(%r8), %ymm4
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm3 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 96(%rcx), %xmm3
+; AVX1-ONLY-NEXT:    vmovaps 96(%rdx), %xmm2
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm10 = xmm2[1],xmm3[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 104(%r8), %ymm11
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm11 = ymm10[0,1,2,3],ymm11[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, 96(%r9), %ymm10, %ymm10
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,3],ymm11[4,5],ymm10[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd (%rdi), %ymm10
+; AVX1-ONLY-NEXT:    vmovapd (%rsi), %ymm11
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm10 = ymm10[1],ymm11[1],ymm10[3],ymm11[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm13[2,3],ymm10[2,3]
+; AVX1-ONLY-NEXT:    vmovapd (%r9), %ymm15
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm15[2,3],ymm11[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm10 = ymm10[0],ymm11[0],ymm10[2],ymm11[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 32(%rdi), %ymm10
+; AVX1-ONLY-NEXT:    vmovapd 32(%rsi), %ymm11
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm10 = ymm10[1],ymm11[1],ymm10[3],ymm11[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm10[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 32(%r9), %ymm10
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm10[2,3],ymm11[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm11[0],ymm0[2],ymm11[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 64(%r8), %ymm0
-; AVX1-ONLY-NEXT:    vmovaps 64(%rsi), %xmm5
-; AVX1-ONLY-NEXT:    vmovaps 64(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm4[1],xmm5[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm2 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 64(%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vmovapd 64(%rsi), %ymm2
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 64(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 64(%rsi), %ymm11
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm11[1],ymm0[3],ymm11[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm0[2,3]
 ; AVX1-ONLY-NEXT:    vmovapd 64(%r9), %ymm0
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm0[2,3],ymm11[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm11[0],ymm1[2],ymm11[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 96(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vmovaps 96(%rsi), %xmm3
-; AVX1-ONLY-NEXT:    vmovaps 96(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm11 = xmm2[1],xmm3[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm1, %ymm11
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm15 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm15[2,3],ymm11[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 96(%rdi), %ymm11
-; AVX1-ONLY-NEXT:    vmovapd 96(%rsi), %ymm15
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm11 = ymm11[1],ymm15[1],ymm11[3],ymm15[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm1[2,3],ymm11[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 96(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vmovapd 96(%rsi), %ymm11
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm11[1],ymm1[3],ymm11[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm4[2,3],ymm1[2,3]
 ; AVX1-ONLY-NEXT:    vmovapd 96(%r9), %ymm1
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm1[2,3],ymm15[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm11 = ymm11[0],ymm15[0],ymm11[2],ymm15[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 16(%rdi), %xmm11
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm11 = xmm11[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 16(%rcx), %ymm15
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3,4,5],ymm15[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 16(%rdx), %xmm11
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm11 = xmm11[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 24(%r8), %ymm15
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm11 = ymm11[0,1],ymm15[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm10 = ymm11[0,1,2],ymm10[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 48(%rdi), %xmm10
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm10 = xmm10[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm1[2,3],ymm11[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[0],ymm11[0],ymm4[2],ymm11[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm4, (%rsp) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 16(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm4 = xmm4[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 16(%rcx), %ymm11
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm11[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 16(%rdx), %xmm4
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm4[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 24(%r8), %ymm11
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm4[0,1],ymm11[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm4[0,1,2],ymm15[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 48(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm4 = xmm4[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],mem[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vbroadcastsd 48(%rcx), %ymm11
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3,4,5],ymm11[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 48(%rdx), %xmm10
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm10 = xmm10[1],mem[1]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm11 = ymm4[0,1,2,3,4,5],ymm11[6,7]
+; AVX1-ONLY-NEXT:    vmovapd 48(%rdx), %xmm4
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm4[1],mem[1]
 ; AVX1-ONLY-NEXT:    vbroadcastsd 56(%r8), %ymm15
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm10 = ymm10[0,1],ymm15[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm10 = ymm10[0,1,2],ymm8[3]
-; AVX1-ONLY-NEXT:    vmovaps 80(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm8 = xmm8[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm4 = ymm4[0,1],ymm15[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm10 = ymm4[0,1,2],ymm10[3]
+; AVX1-ONLY-NEXT:    vmovaps 80(%rdi), %xmm4
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm4 = xmm4[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],mem[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vbroadcastsd 80(%rcx), %ymm15
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5],ymm15[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5],ymm15[6,7]
 ; AVX1-ONLY-NEXT:    vmovapd 80(%rdx), %xmm15
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm15 = xmm15[1],mem[1]
 ; AVX1-ONLY-NEXT:    vbroadcastsd 88(%r8), %ymm13
@@ -1177,69 +1184,69 @@ define void @store_i64_stride6_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5],ymm15[6,7]
 ; AVX1-ONLY-NEXT:    vmovapd 112(%rdx), %xmm15
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm15 = xmm15[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 120(%r8), %ymm11
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm11 = ymm15[0,1],ymm11[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm11[0,1,2],ymm1[3]
-; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm4 = xmm4[0],xmm5[0]
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm5, %xmm5 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm5 = xmm5[0],mem[0]
-; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm6 = xmm6[0],xmm7[0]
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm7 = xmm7[0],mem[0]
+; AVX1-ONLY-NEXT:    vbroadcastsd 120(%r8), %ymm12
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm12 = ymm15[0,1],ymm12[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm12[0,1,2],ymm1[3]
+; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm9, %xmm9 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm9 = xmm9[0],mem[0]
+; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm7 = xmm7[0],xmm8[0]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm8 = xmm8[0],mem[0]
+; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm12 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm12 = xmm14[0],mem[0]
+; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm5 = xmm5[0],xmm6[0]
 ; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm12, %xmm3 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm3 = xmm12[0],mem[0]
-; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm9 = xmm9[0],xmm14[0]
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm11 = xmm11[0],mem[0]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm3 = xmm3[0],mem[0]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm6 = xmm6[0],mem[0]
 ; AVX1-ONLY-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX1-ONLY-NEXT:    vmovaps %xmm11, 16(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm9, (%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm3, 592(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, 576(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm7, 208(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, 192(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm5, 400(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm4, 384(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %xmm6, 16(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm3, (%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, 592(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm5, 576(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm12, 208(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm8, 192(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm7, 400(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm9, 384(%rax)
+; AVX1-ONLY-NEXT:    vmovups (%rsp), %ymm2 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm2, 704(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 608(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm2, 512(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 416(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm2, 320(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 224(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm2, 128(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm2, 32(%rax)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm1, 736(%rax)
 ; AVX1-ONLY-NEXT:    vmovaps %ymm13, 672(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm1, 640(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm1, 608(%rax)
 ; AVX1-ONLY-NEXT:    vmovapd %ymm0, 544(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %ymm8, 480(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %ymm4, 480(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 448(%rax)
-; AVX1-ONLY-NEXT:    vmovapd %ymm10, 352(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 288(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 416(%rax)
+; AVX1-ONLY-NEXT:    vmovapd %ymm10, 352(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %ymm11, 288(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 256(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 224(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 160(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 96(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 64(%rax)
-; AVX1-ONLY-NEXT:    addq $472, %rsp # imm = 0x1D8
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 32(%rax)
+; AVX1-ONLY-NEXT:    addq $456, %rsp # imm = 0x1C8
 ; AVX1-ONLY-NEXT:    vzeroupper
 ; AVX1-ONLY-NEXT:    retq
 ;
@@ -2256,330 +2263,342 @@ define void @store_i64_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-LABEL: store_i64_stride6_vf32:
 ; AVX1-ONLY:       # %bb.0:
 ; AVX1-ONLY-NEXT:    subq $1592, %rsp # imm = 0x638
-; AVX1-ONLY-NEXT:    vmovaps (%rcx), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 32(%rcx), %xmm4
-; AVX1-ONLY-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 64(%rcx), %xmm5
-; AVX1-ONLY-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 96(%rcx), %xmm3
+; AVX1-ONLY-NEXT:    vmovapd (%r8), %ymm1
+; AVX1-ONLY-NEXT:    vmovapd 32(%r8), %ymm4
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; AVX1-ONLY-NEXT:    vmovaps (%rsi), %xmm3
 ; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 32(%rsi), %xmm9
+; AVX1-ONLY-NEXT:    vmovaps 64(%rsi), %xmm10
+; AVX1-ONLY-NEXT:    vmovaps (%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm1[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps (%rcx), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 32(%rcx), %xmm5
+; AVX1-ONLY-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps (%rdx), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, (%r9), %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vbroadcastsd 8(%r8), %ymm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, (%r9), %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vbroadcastsd 8(%r8), %ymm3
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5],ymm2[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; AVX1-ONLY-NEXT:    vmovaps 32(%rdi), %xmm11
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm11[1],xmm9[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 32(%rdx), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm4[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, 32(%r9), %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm5[1]
 ; AVX1-ONLY-NEXT:    vbroadcastsd 40(%r8), %ymm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, 32(%r9), %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 64(%rdx), %xmm10
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm10[1],xmm5[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, 64(%r9), %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vmovaps 64(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm10[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 64(%r8), %ymm6
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm6[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm2 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 64(%rcx), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 64(%rdx), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
 ; AVX1-ONLY-NEXT:    vbroadcastsd 72(%r8), %ymm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, 64(%r9), %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 96(%rsi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 96(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 96(%r8), %ymm5
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm5[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm2 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 96(%rcx), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 96(%rdx), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, 96(%r9), %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
 ; AVX1-ONLY-NEXT:    vbroadcastsd 104(%r8), %ymm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, 96(%r9), %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 128(%rcx), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 128(%rsi), %xmm14
+; AVX1-ONLY-NEXT:    vmovaps 128(%rdi), %xmm15
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm15[1],xmm14[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 128(%r8), %ymm3
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm2 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 128(%rcx), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 128(%rdx), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 136(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 136(%r8), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm2[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 128(%r9), %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 160(%rcx), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 160(%rsi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 160(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 160(%r8), %ymm13
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm13[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm2 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 160(%rcx), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, (%rsp) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 160(%rdx), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 168(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 168(%r8), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm2[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 160(%r9), %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 192(%rcx), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 192(%rsi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 192(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 192(%r8), %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm7 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm7[1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 192(%rcx), %xmm7
+; AVX1-ONLY-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 192(%rdx), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 200(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm7[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 200(%r8), %ymm7
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm7 = ymm0[0,1,2,3],ymm7[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 192(%r9), %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm7[0,1],ymm0[2,3],ymm7[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 224(%rcx), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 224(%rdx), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps 224(%rsi), %xmm7
+; AVX1-ONLY-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 224(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 232(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, 224(%r9), %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd (%r8), %ymm0
-; AVX1-ONLY-NEXT:    vmovaps (%rsi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps (%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm2 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd (%rdi), %ymm1
-; AVX1-ONLY-NEXT:    vmovapd (%rsi), %ymm2
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovapd (%r9), %ymm0
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm7[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm7
+; AVX1-ONLY-NEXT:    vmovapd 224(%r8), %ymm0
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm0[0,1],ymm7[2,3]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm8 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm7[0],ymm8[1],ymm7[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 224(%rcx), %xmm8
+; AVX1-ONLY-NEXT:    vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 224(%rdx), %xmm7
+; AVX1-ONLY-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm7 = xmm7[1],xmm8[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 232(%r8), %ymm12
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm12 = ymm7[0,1,2,3],ymm12[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, 224(%r9), %ymm7, %ymm7
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm7 = ymm12[0,1],ymm7[2,3],ymm12[4,5],ymm7[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd (%rdi), %ymm7
+; AVX1-ONLY-NEXT:    vmovapd (%rsi), %ymm12
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm7 = ymm7[1],ymm12[1],ymm7[3],ymm12[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm7[2,3]
+; AVX1-ONLY-NEXT:    vmovapd (%r9), %ymm7
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm7[2,3],ymm12[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm12[0],ymm1[2],ymm12[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 32(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vmovaps 32(%rsi), %xmm3
-; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 32(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm2
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm3 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 32(%rdi), %ymm2
-; AVX1-ONLY-NEXT:    vmovapd 32(%rsi), %ymm3
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm1[2,3],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 32(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vmovapd 32(%rsi), %ymm12
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm12[1],ymm1[3],ymm12[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm4[2,3],ymm1[2,3]
 ; AVX1-ONLY-NEXT:    vmovapd 32(%r9), %ymm1
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm1[2,3],ymm3[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 64(%r8), %ymm2
-; AVX1-ONLY-NEXT:    vmovaps 64(%rsi), %xmm4
-; AVX1-ONLY-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 64(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vmovaps %xmm3, (%rsp) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm4[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm3
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm4 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 64(%rdi), %ymm3
-; AVX1-ONLY-NEXT:    vmovapd 64(%rsi), %ymm4
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm3[1],ymm4[1],ymm3[3],ymm4[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm2[2,3],ymm3[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 64(%r9), %ymm2
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],ymm4[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 96(%r8), %ymm3
-; AVX1-ONLY-NEXT:    vmovaps 96(%rsi), %xmm5
-; AVX1-ONLY-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 96(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm4[1],xmm5[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm4
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm5 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 96(%rdi), %ymm4
-; AVX1-ONLY-NEXT:    vmovapd 96(%rsi), %ymm5
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm3[2,3],ymm4[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 96(%r9), %ymm3
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm3[2,3],ymm5[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm1[2,3],ymm12[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[0],ymm12[0],ymm4[2],ymm12[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 128(%r8), %ymm4
-; AVX1-ONLY-NEXT:    vmovaps 128(%rsi), %xmm8
-; AVX1-ONLY-NEXT:    vmovaps 128(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm9[1],xmm8[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm5
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm6 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm6[2,3],ymm5[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 128(%rdi), %ymm5
-; AVX1-ONLY-NEXT:    vmovapd 128(%rsi), %ymm6
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm5 = ymm5[1],ymm6[1],ymm5[3],ymm6[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm4[2,3],ymm5[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 128(%r9), %ymm11
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm11[2,3],ymm6[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[3]
+; AVX1-ONLY-NEXT:    vmovapd 64(%rdi), %ymm4
+; AVX1-ONLY-NEXT:    vmovapd 64(%rsi), %ymm12
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm12[1],ymm4[3],ymm12[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm6[2,3],ymm4[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 64(%r9), %ymm6
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm6[2,3],ymm12[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[0],ymm12[0],ymm4[2],ymm12[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 160(%r8), %ymm4
-; AVX1-ONLY-NEXT:    vmovaps 160(%rsi), %xmm6
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 160(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm5[1],xmm6[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm5
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm6 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm6[2,3],ymm5[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 160(%rdi), %ymm5
-; AVX1-ONLY-NEXT:    vmovapd 160(%rsi), %ymm6
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm5 = ymm5[1],ymm6[1],ymm5[3],ymm6[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm4[2,3],ymm5[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 160(%r9), %ymm4
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm4[2,3],ymm6[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[2],ymm6[3]
+; AVX1-ONLY-NEXT:    vmovapd 96(%rdi), %ymm4
+; AVX1-ONLY-NEXT:    vmovapd 96(%rsi), %ymm12
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm12[1],ymm4[3],ymm12[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm5[2,3],ymm4[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 96(%r9), %ymm4
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm4[2,3],ymm12[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm5[0],ymm12[0],ymm5[2],ymm12[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 192(%r8), %ymm5
-; AVX1-ONLY-NEXT:    vmovaps 192(%rsi), %xmm7
-; AVX1-ONLY-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 192(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm6[1],xmm7[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm6
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm7 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 192(%rdi), %ymm6
-; AVX1-ONLY-NEXT:    vmovapd 192(%rsi), %ymm7
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm6 = ymm6[1],ymm7[1],ymm6[3],ymm7[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm5[2,3],ymm6[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 192(%r9), %ymm5
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm5[2,3],ymm7[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[2],ymm7[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 224(%r8), %ymm6
-; AVX1-ONLY-NEXT:    vmovaps 224(%rsi), %xmm13
-; AVX1-ONLY-NEXT:    vmovaps 224(%rdi), %xmm12
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm15 = xmm12[1],xmm13[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm15, %ymm6, %ymm15
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm14 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm7 = ymm15[0,1],ymm14[2,3],ymm15[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 224(%rdi), %ymm14
-; AVX1-ONLY-NEXT:    vmovapd 224(%rsi), %ymm15
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm14 = ymm14[1],ymm15[1],ymm14[3],ymm15[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm14 = ymm6[2,3],ymm14[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 224(%r9), %ymm6
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm6[2,3],ymm15[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm7 = ymm14[0],ymm15[0],ymm14[2],ymm15[3]
+; AVX1-ONLY-NEXT:    vmovapd 128(%rdi), %ymm5
+; AVX1-ONLY-NEXT:    vmovapd 128(%rsi), %ymm12
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm5 = ymm5[1],ymm12[1],ymm5[3],ymm12[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm3[2,3],ymm5[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 128(%r9), %ymm3
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm3[2,3],ymm12[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm5[0],ymm12[0],ymm5[2],ymm12[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 160(%rdi), %ymm5
+; AVX1-ONLY-NEXT:    vmovapd 160(%rsi), %ymm12
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm5 = ymm5[1],ymm12[1],ymm5[3],ymm12[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm13[2,3],ymm5[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 160(%r9), %ymm5
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm5[2,3],ymm12[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm8 = ymm13[0],ymm12[0],ymm13[2],ymm12[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 192(%rdi), %ymm12
+; AVX1-ONLY-NEXT:    vmovapd 192(%rsi), %ymm13
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm12 = ymm12[1],ymm13[1],ymm12[3],ymm13[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm2[2,3],ymm12[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 192(%r9), %ymm2
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm2[2,3],ymm13[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm8 = ymm12[0],ymm13[0],ymm12[2],ymm13[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 224(%rdi), %ymm12
+; AVX1-ONLY-NEXT:    vmovapd 224(%rsi), %ymm13
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm12 = ymm12[1],ymm13[1],ymm12[3],ymm13[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm0[2,3],ymm12[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 224(%r9), %ymm0
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm0[2,3],ymm13[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm8 = ymm12[0],ymm13[0],ymm12[2],ymm13[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 16(%rdi), %xmm12
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm12 = xmm12[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 16(%rcx), %ymm13
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm8 = ymm12[0,1,2,3,4,5],ymm13[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 16(%rdx), %xmm12
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm12 = xmm12[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 24(%r8), %ymm13
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm12 = ymm12[0,1],ymm13[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm12[0,1,2],ymm7[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 16(%rdi), %xmm14
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm14 = xmm14[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 16(%rcx), %ymm15
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm7 = ymm14[0,1,2,3,4,5],ymm15[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 48(%rdi), %xmm7
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm7 = xmm7[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 48(%rcx), %ymm12
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5],ymm12[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 16(%rdx), %xmm14
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm14 = xmm14[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 24(%r8), %ymm15
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm14[0,1,2],ymm0[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 48(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 48(%rcx), %ymm14
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm14[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 48(%rdx), %xmm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 56(%r8), %ymm14
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm14[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 80(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 80(%rcx), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 80(%rdx), %xmm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 88(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 112(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 112(%rcx), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 112(%rdx), %xmm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 120(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 144(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 144(%rcx), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 144(%rdx), %xmm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 152(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm11[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 176(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 176(%rcx), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 48(%rdx), %xmm7
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm7 = xmm7[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 56(%r8), %ymm12
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm7 = ymm7[0,1],ymm12[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm7[0,1,2],ymm1[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 80(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 80(%rcx), %ymm7
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm7[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 80(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 88(%r8), %ymm7
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm7[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm6[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 112(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 112(%rcx), %ymm6
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm6[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 112(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 120(%r8), %ymm6
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm6[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm4[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 144(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 144(%rcx), %ymm4
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 144(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 152(%r8), %ymm4
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm4[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm3[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 176(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 176(%rcx), %ymm3
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 176(%rdx), %xmm1
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 184(%r8), %ymm2
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm4[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 208(%rdi), %xmm2
+; AVX1-ONLY-NEXT:    vbroadcastsd 184(%r8), %ymm3
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 208(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 208(%rcx), %ymm3
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 208(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 216(%r8), %ymm5
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm5[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm2[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 240(%rdi), %xmm2
 ; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],mem[0]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 208(%rcx), %ymm3
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm3[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 208(%rdx), %xmm3
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm3[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 216(%r8), %ymm4
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm3[0,1,2],ymm5[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 240(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm4 = xmm4[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],mem[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vbroadcastsd 240(%rcx), %ymm5
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm4[0,1,2,3,4,5],ymm5[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm5[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 240(%rdx), %xmm5
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm5[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 248(%r8), %ymm14
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm14[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm5[0,1,2],ymm6[3]
+; AVX1-ONLY-NEXT:    vbroadcastsd 248(%r8), %ymm13
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm13[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm5[0,1,2],ymm0[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm11 = xmm9[0],xmm8[0]
+; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm15 = xmm15[0],xmm14[0]
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm14 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm14 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm15 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm15 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm10 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm10 = xmm10[0],mem[0]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm13 = xmm0[0],xmm10[0]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm12 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm12 = xmm0[0],mem[0]
+; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm11 = xmm11[0],xmm9[0]
+; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm10 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    # xmm10 = xmm0[0],mem[0]
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm9 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm9 = xmm0[0],mem[0]
@@ -2590,7 +2609,7 @@ define void @store_i64_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm7 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm7 = xmm0[0],mem[0]
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm6 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm6 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm6 = xmm0[0],mem[0]
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm5 # 16-byte Folded Reload
@@ -2598,10 +2617,6 @@ define void @store_i64_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm4 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm4 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vmovlhps {{.*#+}} xmm12 = xmm12[0],xmm13[0]
-; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm13 # 16-byte Folded Reload
-; AVX1-ONLY-NEXT:    # xmm13 = xmm0[0],mem[0]
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm3 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm3 = xmm0[0],mem[0]
@@ -2619,98 +2634,98 @@ define void @store_i64_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, (%rax)
 ; AVX1-ONLY-NEXT:    vmovaps %xmm2, 1168(%rax)
 ; AVX1-ONLY-NEXT:    vmovaps %xmm3, 1152(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm13, 1360(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm12, 1344(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm4, 976(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm5, 960(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, 592(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm7, 576(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm8, 208(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm9, 192(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm10, 400(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm15, 384(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm4, 1360(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm5, 1344(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm6, 976(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm7, 960(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm8, 592(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm9, 576(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm10, 208(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm11, 192(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm12, 400(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm13, 384(%rax)
 ; AVX1-ONLY-NEXT:    vmovaps %xmm14, 784(%rax)
-; AVX1-ONLY-NEXT:    vmovaps %xmm11, 768(%rax)
+; AVX1-ONLY-NEXT:    vmovaps %xmm15, 768(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1472(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1376(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1280(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1184(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1088(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 992(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 896(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 800(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 704(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 608(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 512(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 416(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 320(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 224(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 128(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 32(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1504(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1440(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1408(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1376(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1312(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1248(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1216(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1184(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1120(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1056(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1024(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 992(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 928(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 864(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 832(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 800(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 736(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 672(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 640(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 608(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 544(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 480(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 448(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 416(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 352(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 288(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 256(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 224(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 160(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 96(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 64(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 32(%rax)
 ; AVX1-ONLY-NEXT:    addq $1592, %rsp # imm = 0x638
 ; AVX1-ONLY-NEXT:    vzeroupper
 ; AVX1-ONLY-NEXT:    retq
@@ -4802,46 +4817,101 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-LABEL: store_i64_stride6_vf64:
 ; AVX1-ONLY:       # %bb.0:
 ; AVX1-ONLY-NEXT:    subq $3464, %rsp # imm = 0xD88
+; AVX1-ONLY-NEXT:    vmovapd 32(%r8), %ymm6
+; AVX1-ONLY-NEXT:    vmovapd (%r8), %ymm7
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; AVX1-ONLY-NEXT:    vmovaps (%rsi), %xmm2
+; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 32(%rsi), %xmm4
+; AVX1-ONLY-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 64(%rsi), %xmm5
+; AVX1-ONLY-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps (%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm7[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps (%rcx), %xmm1
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps (%rdx), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, (%r9), %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vbroadcastsd 8(%r8), %ymm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 8(%r8), %ymm3
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm3[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm0 = mem[0,0]
+; AVX1-ONLY-NEXT:    vmovaps 32(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm6[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 32(%rcx), %xmm1
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 64(%rcx), %xmm3
-; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 32(%rdx), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, 32(%r9), %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vbroadcastsd 40(%r8), %ymm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 40(%r8), %ymm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, 32(%r9), %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 64(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm5[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 64(%r8), %ymm10
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm10[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 64(%rcx), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, (%rsp) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 64(%rdx), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, 64(%r9), %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vbroadcastsd 72(%r8), %ymm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 72(%r8), %ymm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, 64(%r9), %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 96(%rsi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 96(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 96(%r8), %ymm8
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm8[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 96(%rcx), %xmm1
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 96(%rdx), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, 96(%r9), %ymm0, %ymm1
-; AVX1-ONLY-NEXT:    vbroadcastsd 104(%r8), %ymm2
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 104(%r8), %ymm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, 96(%r9), %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 128(%rsi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 128(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 128(%r8), %ymm14
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm14[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 128(%rcx), %xmm1
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 128(%rdx), %xmm0
@@ -4852,8 +4922,20 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 128(%r9), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 160(%rsi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 160(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovaps 160(%r8), %ymm1
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 160(%rcx), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, (%rsp) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 160(%rdx), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
@@ -4862,6 +4944,17 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 160(%r9), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 192(%rsi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 192(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 192(%r8), %ymm15
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm15[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 192(%rcx), %xmm1
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 192(%rdx), %xmm0
@@ -4872,6 +4965,18 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 192(%r9), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 224(%rsi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 224(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovaps 224(%r8), %ymm1
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 224(%rcx), %xmm1
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 224(%rdx), %xmm0
@@ -4882,6 +4987,18 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 224(%r9), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 256(%rsi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 256(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovaps 256(%r8), %ymm1
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 256(%rcx), %xmm1
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 256(%rdx), %xmm0
@@ -4892,6 +5009,18 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 256(%r9), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 288(%rsi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 288(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovaps 288(%r8), %ymm1
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 288(%rcx), %xmm1
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 288(%rdx), %xmm0
@@ -4902,6 +5031,18 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 288(%r9), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 320(%rsi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 320(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovaps 320(%r8), %ymm1
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 320(%rcx), %xmm1
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 320(%rdx), %xmm0
@@ -4912,6 +5053,17 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 320(%r9), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 352(%rsi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 352(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 352(%r8), %ymm11
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm11[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 352(%rcx), %xmm1
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 352(%rdx), %xmm0
@@ -4922,6 +5074,17 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 352(%r9), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 384(%rsi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 384(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 384(%r8), %ymm9
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm9[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 384(%rcx), %xmm1
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 384(%rdx), %xmm0
@@ -4932,6 +5095,17 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 384(%r9), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 416(%rsi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 416(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 416(%r8), %ymm12
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm12[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 416(%rcx), %xmm1
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 416(%rdx), %xmm0
@@ -4942,6 +5116,17 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 416(%r9), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 448(%rsi), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 448(%rdi), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 448(%r8), %ymm13
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm13[0,1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm1 = mem[0,0]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 448(%rcx), %xmm1
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 448(%rdx), %xmm0
@@ -4952,490 +5137,348 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, 448(%r9), %ymm0, %ymm0
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 480(%rcx), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps 480(%rsi), %xmm1
 ; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 480(%rdx), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps 480(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 488(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, 480(%r9), %ymm0, %ymm0
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd (%r8), %ymm0
-; AVX1-ONLY-NEXT:    vmovaps (%rsi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps (%rdi), %xmm1
-; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm1
+; AVX1-ONLY-NEXT:    vmovapd 480(%r8), %ymm3
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm3[0,1],ymm1[2,3]
 ; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm2 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0],ymm2[1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 480(%rcx), %xmm0
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 480(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 488(%r8), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vinsertf128 $1, 480(%r9), %ymm1, %ymm1
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd (%rdi), %ymm1
 ; AVX1-ONLY-NEXT:    vmovapd (%rsi), %ymm2
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm2[1],ymm1[3],ymm2[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vmovapd (%r9), %ymm0
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm7[2,3],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovapd (%r9), %ymm4
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm4[2,3],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[2],ymm2[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 32(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vmovaps 32(%rsi), %xmm3
-; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 32(%rdi), %xmm2
-; AVX1-ONLY-NEXT:    vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm2
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm3 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovapd 32(%rdi), %ymm2
-; AVX1-ONLY-NEXT:    vmovapd 32(%rsi), %ymm3
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm2 = ymm1[2,3],ymm2[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 32(%r9), %ymm1
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm1[2,3],ymm3[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 64(%r8), %ymm2
-; AVX1-ONLY-NEXT:    vmovaps 64(%rsi), %xmm4
-; AVX1-ONLY-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 64(%rdi), %xmm3
-; AVX1-ONLY-NEXT:    vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm3 = xmm3[1],xmm4[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm3
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm4 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 64(%rdi), %ymm3
-; AVX1-ONLY-NEXT:    vmovapd 64(%rsi), %ymm4
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm3 = ymm3[1],ymm4[1],ymm3[3],ymm4[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm2[2,3],ymm3[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 64(%r9), %ymm2
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],ymm4[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[2],ymm4[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 96(%r8), %ymm3
-; AVX1-ONLY-NEXT:    vmovaps 96(%rsi), %xmm5
-; AVX1-ONLY-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 96(%rdi), %xmm4
-; AVX1-ONLY-NEXT:    vmovaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm4 = xmm4[1],xmm5[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm4
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm5 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 96(%rdi), %ymm4
-; AVX1-ONLY-NEXT:    vmovapd 96(%rsi), %ymm5
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm4 = ymm3[2,3],ymm4[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 96(%r9), %ymm3
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm3[2,3],ymm5[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 128(%r8), %ymm4
-; AVX1-ONLY-NEXT:    vmovaps 128(%rsi), %xmm6
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 128(%rdi), %xmm5
-; AVX1-ONLY-NEXT:    vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm5 = xmm5[1],xmm6[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm5
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm6 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm6[2,3],ymm5[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 128(%rdi), %ymm5
-; AVX1-ONLY-NEXT:    vmovapd 128(%rsi), %ymm6
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm5 = ymm5[1],ymm6[1],ymm5[3],ymm6[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm5 = ymm4[2,3],ymm5[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 128(%r9), %ymm4
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm4[2,3],ymm6[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[2],ymm6[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 160(%r8), %ymm5
-; AVX1-ONLY-NEXT:    vmovaps 160(%rsi), %xmm7
-; AVX1-ONLY-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 160(%rdi), %xmm6
-; AVX1-ONLY-NEXT:    vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm6 = xmm6[1],xmm7[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm6, %ymm5, %ymm6
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm7 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 160(%rdi), %ymm6
-; AVX1-ONLY-NEXT:    vmovapd 160(%rsi), %ymm7
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm6 = ymm6[1],ymm7[1],ymm6[3],ymm7[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm6 = ymm5[2,3],ymm6[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 160(%r9), %ymm5
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm5[2,3],ymm7[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[2],ymm7[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 192(%r8), %ymm6
-; AVX1-ONLY-NEXT:    vmovaps 192(%rsi), %xmm8
-; AVX1-ONLY-NEXT:    vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 192(%rdi), %xmm7
-; AVX1-ONLY-NEXT:    vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm7 = xmm7[1],xmm8[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm7, %ymm6, %ymm7
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm8 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm8[2,3],ymm7[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 192(%rdi), %ymm7
-; AVX1-ONLY-NEXT:    vmovapd 192(%rsi), %ymm8
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm7 = ymm7[1],ymm8[1],ymm7[3],ymm8[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm6[2,3],ymm7[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 192(%r9), %ymm6
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm6[2,3],ymm8[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 224(%r8), %ymm7
-; AVX1-ONLY-NEXT:    vmovaps 224(%rsi), %xmm9
-; AVX1-ONLY-NEXT:    vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 224(%rdi), %xmm8
-; AVX1-ONLY-NEXT:    vmovaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm8 = xmm8[1],xmm9[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm8, %ymm7, %ymm8
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm9 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 224(%rdi), %ymm8
-; AVX1-ONLY-NEXT:    vmovapd 224(%rsi), %ymm9
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm8 = ymm8[1],ymm9[1],ymm8[3],ymm9[3]
+; AVX1-ONLY-NEXT:    vmovapd 32(%rsi), %ymm0
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm0[1],ymm2[3],ymm0[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm6[2,3],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 32(%r9), %ymm5
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm5[2,3],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 64(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 64(%rsi), %ymm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm10[2,3],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 64(%r9), %ymm6
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm6[2,3],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 96(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 96(%rsi), %ymm7
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm7[1],ymm0[3],ymm7[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm8[2,3],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 96(%r9), %ymm10
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm10[2,3],ymm7[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm7[0],ymm0[2],ymm7[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 128(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 128(%rsi), %ymm7
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm7[1],ymm0[3],ymm7[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm14[2,3],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 128(%r9), %ymm14
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm7 = ymm14[2,3],ymm7[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm7[0],ymm0[2],ymm7[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 160(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 160(%rsi), %ymm8
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm8[1],ymm0[3],ymm8[3]
+; AVX1-ONLY-NEXT:    vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm0 = mem[2,3],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 160(%r9), %ymm7
 ; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm7[2,3],ymm8[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 224(%r9), %ymm7
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm7[2,3],ymm9[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm8 = ymm8[0],ymm9[0],ymm8[2],ymm9[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 256(%r8), %ymm8
-; AVX1-ONLY-NEXT:    vmovaps 256(%rsi), %xmm10
-; AVX1-ONLY-NEXT:    vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 256(%rdi), %xmm9
-; AVX1-ONLY-NEXT:    vmovaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm9 = xmm9[1],xmm10[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm9, %ymm8, %ymm9
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm10 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm10[2,3],ymm9[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 256(%rdi), %ymm9
-; AVX1-ONLY-NEXT:    vmovapd 256(%rsi), %ymm10
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm9 = ymm9[1],ymm10[1],ymm9[3],ymm10[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm8[2,3],ymm9[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 256(%r9), %ymm8
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm8[2,3],ymm10[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm9 = ymm9[0],ymm10[0],ymm9[2],ymm10[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 288(%r8), %ymm9
-; AVX1-ONLY-NEXT:    vmovaps 288(%rsi), %xmm11
-; AVX1-ONLY-NEXT:    vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 288(%rdi), %xmm10
-; AVX1-ONLY-NEXT:    vmovaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm10 = xmm10[1],xmm11[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm10
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm11 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm11[2,3],ymm10[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 288(%rdi), %ymm10
-; AVX1-ONLY-NEXT:    vmovapd 288(%rsi), %ymm11
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm10 = ymm10[1],ymm11[1],ymm10[3],ymm11[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm10 = ymm9[2,3],ymm10[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 288(%r9), %ymm9
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm9[2,3],ymm11[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm10 = ymm10[0],ymm11[0],ymm10[2],ymm11[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 320(%r8), %ymm10
-; AVX1-ONLY-NEXT:    vmovaps 320(%rsi), %xmm12
-; AVX1-ONLY-NEXT:    vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 320(%rdi), %xmm11
-; AVX1-ONLY-NEXT:    vmovaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm11 = xmm11[1],xmm12[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm10, %ymm11
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm12 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm12[2,3],ymm11[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 320(%rdi), %ymm11
-; AVX1-ONLY-NEXT:    vmovapd 320(%rsi), %ymm12
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm11 = ymm11[1],ymm12[1],ymm11[3],ymm12[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm11 = ymm10[2,3],ymm11[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 320(%r9), %ymm10
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm10[2,3],ymm12[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm11 = ymm11[0],ymm12[0],ymm11[2],ymm12[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 352(%r8), %ymm11
-; AVX1-ONLY-NEXT:    vmovaps 352(%rsi), %xmm13
-; AVX1-ONLY-NEXT:    vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 352(%rdi), %xmm12
-; AVX1-ONLY-NEXT:    vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm12 = xmm12[1],xmm13[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm12, %ymm11, %ymm12
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm13 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm12 = ymm12[0,1],ymm13[2,3],ymm12[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 352(%rdi), %ymm12
-; AVX1-ONLY-NEXT:    vmovapd 352(%rsi), %ymm13
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm12 = ymm12[1],ymm13[1],ymm12[3],ymm13[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm12 = ymm11[2,3],ymm12[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 352(%r9), %ymm11
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm11[2,3],ymm13[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm12 = ymm12[0],ymm13[0],ymm12[2],ymm13[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 384(%r8), %ymm12
-; AVX1-ONLY-NEXT:    vmovaps 384(%rsi), %xmm14
-; AVX1-ONLY-NEXT:    vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 384(%rdi), %xmm13
-; AVX1-ONLY-NEXT:    vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm13 = xmm13[1],xmm14[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm13, %ymm12, %ymm13
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm14 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm14[2,3],ymm13[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 384(%rdi), %ymm13
-; AVX1-ONLY-NEXT:    vmovapd 384(%rsi), %ymm14
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm13 = ymm13[1],ymm14[1],ymm13[3],ymm14[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm12[2,3],ymm13[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 384(%r9), %ymm12
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm14 = ymm12[2,3],ymm14[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm13 = ymm13[0],ymm14[0],ymm13[2],ymm14[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 416(%r8), %ymm13
-; AVX1-ONLY-NEXT:    vmovaps 416(%rsi), %xmm14
-; AVX1-ONLY-NEXT:    vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 416(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm14 = xmm0[1],xmm14[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm14, %ymm13, %ymm14
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm15 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm15[2,3],ymm14[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 416(%rdi), %ymm14
-; AVX1-ONLY-NEXT:    vmovapd 416(%rsi), %ymm15
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm14 = ymm14[1],ymm15[1],ymm14[3],ymm15[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm14 = ymm13[2,3],ymm14[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 416(%r9), %ymm0
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm8[0],ymm0[2],ymm8[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm0[2,3],ymm15[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm13 = ymm14[0],ymm15[0],ymm14[2],ymm15[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 448(%r8), %ymm14
-; AVX1-ONLY-NEXT:    vmovaps 448(%rsi), %xmm0
-; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 448(%rdi), %xmm13
-; AVX1-ONLY-NEXT:    vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm15 = xmm13[1],xmm0[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm15, %ymm14, %ymm15
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm13 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm13 = ymm15[0,1],ymm13[2,3],ymm15[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 448(%rdi), %ymm13
-; AVX1-ONLY-NEXT:    vmovapd 448(%rsi), %ymm15
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm13 = ymm13[1],ymm15[1],ymm13[3],ymm15[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm13 = ymm14[2,3],ymm13[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 448(%r9), %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 192(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 192(%rsi), %ymm8
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm8[1],ymm0[3],ymm8[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm15[2,3],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 192(%r9), %ymm15
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm15[2,3],ymm8[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm8[0],ymm0[2],ymm8[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm0[2,3],ymm15[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm13 = ymm13[0],ymm15[0],ymm13[2],ymm15[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 480(%r8), %ymm13
-; AVX1-ONLY-NEXT:    vmovaps 480(%rsi), %xmm0
-; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 480(%rdi), %xmm14
-; AVX1-ONLY-NEXT:    vmovaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm15 = xmm14[1],xmm0[1]
-; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm15, %ymm13, %ymm15
-; AVX1-ONLY-NEXT:    vmovddup {{.*#+}} xmm14 = mem[0,0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3],ymm15[4,5,6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 480(%rdi), %ymm14
-; AVX1-ONLY-NEXT:    vmovapd 480(%rsi), %ymm15
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm14 = ymm14[1],ymm15[1],ymm14[3],ymm15[3]
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm14 = ymm13[2,3],ymm14[2,3]
-; AVX1-ONLY-NEXT:    vmovapd 480(%r9), %ymm13
-; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm15 = ymm13[2,3],ymm15[2,3]
-; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm14 = ymm14[0],ymm15[0],ymm14[2],ymm15[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 16(%rdi), %xmm14
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm14 = xmm14[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 16(%rcx), %ymm15
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3,4,5],ymm15[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 16(%rdx), %xmm14
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm14 = xmm14[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 24(%r8), %ymm15
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm0 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm0 = ymm14[0,1,2,3,4,5],mem[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 48(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 48(%rcx), %ymm14
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm14[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 48(%rdx), %xmm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 56(%r8), %ymm14
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm14[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3]
+; AVX1-ONLY-NEXT:    vmovapd 224(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 224(%rsi), %ymm8
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm8[1],ymm0[3],ymm8[3]
+; AVX1-ONLY-NEXT:    vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm0 = mem[2,3],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 224(%r9), %ymm1
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm8 = ymm1[2,3],ymm8[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm8[0],ymm0[2],ymm8[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 80(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 80(%rcx), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 80(%rdx), %xmm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 88(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3]
+; AVX1-ONLY-NEXT:    vmovapd 256(%rdi), %ymm8
+; AVX1-ONLY-NEXT:    vmovapd 256(%rsi), %ymm0
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm8 = ymm8[1],ymm0[1],ymm8[3],ymm0[3]
+; AVX1-ONLY-NEXT:    vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8, %ymm1 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm1 = mem[2,3],ymm8[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 256(%r9), %ymm8
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm8[2,3],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 112(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 112(%rcx), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 112(%rdx), %xmm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 120(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm3[3]
+; AVX1-ONLY-NEXT:    vmovapd 288(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 288(%rsi), %ymm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX1-ONLY-NEXT:    vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm2 = mem[2,3],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 288(%r9), %ymm0
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 144(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 144(%rcx), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 144(%rdx), %xmm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 152(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm4[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm2[0],ymm1[0],ymm2[2],ymm1[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 176(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 176(%rcx), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 176(%rdx), %xmm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 184(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm5[3]
+; AVX1-ONLY-NEXT:    vmovapd 320(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 320(%rsi), %ymm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX1-ONLY-NEXT:    vperm2f128 $19, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm0 = mem[2,3],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 320(%r9), %ymm2
+; AVX1-ONLY-NEXT:    vmovupd %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 208(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 208(%rcx), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 208(%rdx), %xmm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 216(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm6[3]
+; AVX1-ONLY-NEXT:    vmovapd 352(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 352(%rsi), %ymm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm11[2,3],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 352(%r9), %ymm11
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm11[2,3],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 240(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 240(%rcx), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 240(%rdx), %xmm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 248(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm7[3]
+; AVX1-ONLY-NEXT:    vmovapd 384(%rdi), %ymm0
+; AVX1-ONLY-NEXT:    vmovapd 384(%rsi), %ymm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm9[2,3],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 384(%r9), %ymm0
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 272(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 272(%rcx), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm0[2,3],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm9[0],ymm1[0],ymm9[2],ymm1[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 416(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vmovapd 416(%rsi), %ymm9
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm9[1],ymm1[3],ymm9[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm12[2,3],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 416(%r9), %ymm12
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm12[2,3],ymm9[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm9[0],ymm1[2],ymm9[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 448(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vmovapd 448(%rsi), %ymm9
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm9[1],ymm1[3],ymm9[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm13[2,3],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 448(%r9), %ymm13
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm9 = ymm13[2,3],ymm9[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm9[0],ymm1[2],ymm9[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 480(%rdi), %ymm1
+; AVX1-ONLY-NEXT:    vmovapd 480(%rsi), %ymm0
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm0[1],ymm1[3],ymm0[3]
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm3[2,3],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vmovapd 480(%r9), %ymm9
+; AVX1-ONLY-NEXT:    vperm2f128 {{.*#+}} ymm3 = ymm9[2,3],ymm0[2,3]
+; AVX1-ONLY-NEXT:    vshufpd {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[2],ymm3[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 16(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 16(%rcx), %ymm3
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 16(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 24(%r8), %ymm3
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm4[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 48(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 48(%rcx), %ymm3
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm3[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 48(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 56(%r8), %ymm3
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm3[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 80(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 80(%rcx), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 80(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 88(%r8), %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm6[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 112(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 112(%rcx), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 112(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 120(%r8), %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm10[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 144(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 144(%rcx), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 144(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 152(%r8), %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm14[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 176(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 176(%rcx), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 176(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 184(%r8), %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm7[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 208(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 208(%rcx), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 208(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 216(%r8), %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm15[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 240(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 240(%rcx), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 240(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 248(%r8), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm1 = ymm1[0,1,2,3,4,5],mem[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 272(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 272(%rcx), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovapd 272(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 280(%r8), %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm8[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 304(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 304(%rcx), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm2[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 272(%rdx), %xmm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 280(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm8[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 304(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 304(%rcx), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 304(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 312(%r8), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm0 = ymm1[0,1,2,3,4,5],mem[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 304(%rdx), %xmm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 312(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm9[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 336(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 336(%rcx), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 336(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 336(%rcx), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm2[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 336(%rdx), %xmm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 344(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm10[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 368(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 368(%rcx), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 336(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 344(%r8), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm0 = ymm1[0,1,2,3,4,5],mem[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 368(%rdx), %xmm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 376(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm11[3]
-; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 400(%rdi), %xmm0
-; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
-; AVX1-ONLY-NEXT:    vbroadcastsd 400(%rcx), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX1-ONLY-NEXT:    vmovaps 368(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 368(%rcx), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm2[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovapd 400(%rdx), %xmm0
-; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
-; AVX1-ONLY-NEXT:    vbroadcastsd 408(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm12[3]
+; AVX1-ONLY-NEXT:    vmovapd 368(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 376(%r8), %ymm2
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm11[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 400(%rdi), %xmm1
+; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-ONLY-NEXT:    vbroadcastsd 400(%rcx), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm2[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps 400(%rdx), %xmm1
+; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm1 = xmm1[1],mem[1]
+; AVX1-ONLY-NEXT:    vbroadcastsd 408(%r8), %ymm2
+; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-ONLY-NEXT:    vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
+; AVX1-ONLY-NEXT:    # ymm0 = ymm1[0,1,2,3,4,5],mem[6,7]
+; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 432(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vbroadcastsd 432(%rcx), %ymm1
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 432(%rdx), %xmm0
+; AVX1-ONLY-NEXT:    vmovapd 432(%rdx), %xmm0
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
 ; AVX1-ONLY-NEXT:    vbroadcastsd 440(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm12[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 464(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
 ; AVX1-ONLY-NEXT:    vbroadcastsd 464(%rcx), %ymm1
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
 ; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX1-ONLY-NEXT:    vmovaps 464(%rdx), %xmm0
+; AVX1-ONLY-NEXT:    vmovapd 464(%rdx), %xmm0
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
 ; AVX1-ONLY-NEXT:    vbroadcastsd 472(%r8), %ymm1
-; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX1-ONLY-NEXT:    vblendps $192, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
-; AVX1-ONLY-NEXT:    # ymm0 = ymm0[0,1,2,3,4,5],mem[6,7]
-; AVX1-ONLY-NEXT:    vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm13[3]
+; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps 496(%rdi), %xmm0
 ; AVX1-ONLY-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
 ; AVX1-ONLY-NEXT:    vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
@@ -5446,7 +5489,7 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1]
 ; AVX1-ONLY-NEXT:    vbroadcastsd 504(%r8), %ymm1
 ; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
-; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm13[3]
+; AVX1-ONLY-NEXT:    vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm9[3]
 ; AVX1-ONLY-NEXT:    vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
@@ -5469,7 +5512,7 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    # xmm0 = xmm0[0],mem[0]
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm0 = xmm0[0],mem[0]
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
@@ -5493,13 +5536,13 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    # xmm0 = xmm0[0],mem[0]
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm0 = xmm0[0],mem[0]
-; AVX1-ONLY-NEXT:    vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
 ; AVX1-ONLY-NEXT:    # xmm0 = xmm0[0],mem[0]
@@ -5583,9 +5626,9 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, 1152(%rax)
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, 1360(%rax)
-; AVX1-ONLY-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %xmm0, 1344(%rax)
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %xmm0, 1344(%rax)
+; AVX1-ONLY-NEXT:    vmovaps (%rsp), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, 976(%rax)
 ; AVX1-ONLY-NEXT:    vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %xmm0, 960(%rax)
@@ -5612,163 +5655,163 @@ define void @store_i64_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 3008(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2912(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2816(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2720(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2624(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2528(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2432(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2336(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2240(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2144(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2048(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1952(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1856(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1760(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1664(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1568(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1472(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1376(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1280(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1184(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1088(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 992(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 896(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 800(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 704(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 608(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 512(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 416(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 320(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 224(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 128(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX1-ONLY-NEXT:    vmovaps %ymm0, 32(%rax)
-; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 3040(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2976(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2944(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2912(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2848(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2784(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2752(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2720(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2656(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2592(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2560(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2528(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2464(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2400(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2368(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2336(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2272(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2208(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2176(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2144(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2080(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 2016(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1984(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1952(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1888(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1824(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1792(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1760(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1696(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1632(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1600(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1568(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1504(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1440(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1408(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1376(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1312(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1248(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1216(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1184(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1120(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1056(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 1024(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 992(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 928(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 864(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 832(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 800(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 736(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 672(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 640(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 608(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 544(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 480(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 448(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 416(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 352(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 288(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 256(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 224(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 160(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 96(%rax)
 ; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX1-ONLY-NEXT:    vmovaps %ymm0, 64(%rax)
+; AVX1-ONLY-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX1-ONLY-NEXT:    vmovaps %ymm0, 32(%rax)
 ; AVX1-ONLY-NEXT:    addq $3464, %rsp # imm = 0xD88
 ; AVX1-ONLY-NEXT:    vzeroupper
 ; AVX1-ONLY-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
index accdbb1515072..17ed73d9e3b16 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
@@ -164,10 +164,10 @@ define void @store_i8_stride3_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX2-ONLY-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX2-ONLY-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
 ; AVX2-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm2
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,8],zero,ymm0[1,9],zero,ymm0[2,10],zero,ymm0[3,11],zero,ymm0[4,12],zero,ymm0[5],zero,ymm0[21],zero,zero,ymm0[22],zero,zero,ymm0[23],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[0],zero,zero,ymm0[1],zero,zero,ymm0[2],zero,zero,ymm0[3],zero,zero,ymm0[4],zero,ymm0[29],zero,ymm0[22,30],zero,ymm0[23,31],zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,8],zero,ymm2[1,9],zero,ymm2[2,10],zero,ymm2[3,11],zero,ymm2[4,12],zero,ymm2[5],zero,ymm2[21],zero,zero,ymm2[22],zero,zero,ymm2[23],zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX2-ONLY-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-ONLY-NEXT:    vmovq %xmm1, 16(%rcx)
@@ -181,10 +181,10 @@ define void @store_i8_stride3_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX512F-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
 ; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm2
-; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,8],zero,ymm0[1,9],zero,ymm0[2,10],zero,ymm0[3,11],zero,ymm0[4,12],zero,ymm0[5],zero,ymm0[21],zero,zero,ymm0[22],zero,zero,ymm0[23,u,u,u,u,u,u,u,u]
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[0],zero,zero,ymm0[1],zero,zero,ymm0[2],zero,zero,ymm0[3],zero,zero,ymm0[4],zero,ymm0[29],zero,ymm0[22,30],zero,ymm0[23,31],zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,8],zero,ymm2[1,9],zero,ymm2[2,10],zero,ymm2[3,11],zero,ymm2[4,12],zero,ymm2[5],zero,ymm2[21],zero,zero,ymm2[22],zero,zero,ymm2[23,u,u,u,u,u,u,u,u]
 ; AVX512F-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vmovq %xmm1, 16(%rcx)
@@ -198,10 +198,10 @@ define void @store_i8_stride3_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX512BW-NEXT:    vmovq {{.*#+}} xmm1 = mem[0],zero
 ; AVX512BW-NEXT:    vmovq {{.*#+}} xmm2 = mem[0],zero
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; AVX512BW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm2
-; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,8],zero,ymm0[1,9],zero,ymm0[2,10],zero,ymm0[3,11],zero,ymm0[4,12],zero,ymm0[5],zero,ymm0[21],zero,zero,ymm0[22],zero,zero,ymm0[23],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[0],zero,zero,ymm0[1],zero,zero,ymm0[2],zero,zero,ymm0[3],zero,zero,ymm0[4],zero,ymm0[29],zero,ymm0[22,30],zero,ymm0[23,31],zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,8],zero,ymm2[1,9],zero,ymm2[2,10],zero,ymm2[3,11],zero,ymm2[4,12],zero,ymm2[5],zero,ymm2[21],zero,zero,ymm2[22],zero,zero,ymm2[23],zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512BW-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vmovq %xmm1, 16(%rcx)

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
index ad0813a3df1a4..04bc4e5da890f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
@@ -1874,15 +1874,16 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,1,1]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255]
 ; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm6, %ymm11, %ymm9
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3],xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm6
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3],xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,1,1]
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3],xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,1,1]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm6, %ymm7, %ymm5
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm9[0,1,2,3]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %ymm8, %ymm7, %ymm5
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm6[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm6
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm6[0,0,1,1]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1]
@@ -1909,7 +1910,8 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpor %ymm5, %ymm11, %ymm5
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm5
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[0,1,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm0[12],zero,zero,zero,zero,ymm0[13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,ymm0[17],zero,zero,zero,zero,ymm0[18],zero
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm0[0,2,1,1,4,6,5,5]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,3,2]
@@ -1957,15 +1959,16 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,1,1]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255]
 ; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm5, %ymm10, %ymm8
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,1,1]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm5
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,1,1]
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm5, %ymm6, %ymm4
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm4[0,1,2,3],zmm8[0,1,2,3]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %ymm7, %ymm6, %ymm4
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm4[0,1,2,3],zmm5[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <u,0,0,0,0,u,1,1,9,9,u,10,10,10,10,u>
 ; AVX512F-FAST-NEXT:    vpermd %zmm4, %zmm7, %zmm7
@@ -1989,7 +1992,8 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpor %ymm5, %ymm11, %ymm5
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm5
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[0,1,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <4,u,5,5,5,5,u,6>
 ; AVX512F-FAST-NEXT:    vpermd %ymm4, %ymm8, %ymm8
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
@@ -2119,61 +2123,62 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %ymm3
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %ymm0
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %ymm2
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm0[21],zero,ymm0[21,20],zero,ymm0[22],zero,ymm0[24],zero,ymm0[22,23],zero,ymm0[25]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vpor %ymm4, %ymm5, %ymm4
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,ymm0[12,13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,ymm0[14,15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,ymm0[18],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
-; AVX512BW-FAST-NEXT:    vpor %ymm5, %ymm6, %ymm5
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm5
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero,ymm3[25],zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm4
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm4[8],zero,xmm4[u,7],zero,xmm4[9],zero,xmm4[u],zero,xmm4[u,10],zero,xmm4[12],zero,xmm4[u,11]
+; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %xmm6
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = zero,xmm6[8,u],zero,xmm6[7],zero,xmm6[9,u,11,u],zero,xmm6[10],zero,xmm6[12,u],zero
+; AVX512BW-FAST-NEXT:    vpor %xmm5, %xmm7, %xmm5
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm7
+; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %xmm8
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm9, %zmm5
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm5 = zmm5[0,0,1,1,4,4,5,5]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = zero,xmm8[6],zero,xmm8[8,u],zero,xmm8[7],zero,xmm8[9],zero,xmm8[11,u],zero,xmm8[10],zero,xmm8[12]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[6],zero,xmm7[8],zero,xmm7[u,7],zero,xmm7[9],zero,xmm7[11],zero,xmm7[u,10],zero,xmm7[12],zero
+; AVX512BW-FAST-NEXT:    vpor %xmm7, %xmm8, %xmm7
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm7, %zmm4, %zmm4
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm4 = zmm4[0,0,1,1,4,4,5,5]
+; AVX512BW-FAST-NEXT:    movabsq $3570337559743967628, %rax # imm = 0x318C631818C6318C
+; AVX512BW-FAST-NEXT:    kmovq %rax, %k1
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm5, %zmm4 {%k1}
+; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10]
+; AVX512BW-FAST-NEXT:    vpermd %zmm5, %zmm6, %zmm6
+; AVX512BW-FAST-NEXT:    movabsq $595056260442243600, %rax # imm = 0x842108421084210
+; AVX512BW-FAST-NEXT:    kmovq %rax, %k1
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm6, %zmm4 {%k1}
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm0[21],zero,ymm0[21,20],zero,ymm0[22],zero,ymm0[24],zero,ymm0[22,23],zero,ymm0[25]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vpor %ymm4, %ymm6, %ymm4
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <3,3,3,u,4,4,4,4>
-; AVX512BW-FAST-NEXT:    vpermd %ymm1, %ymm6, %ymm6
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vpor %ymm6, %ymm7, %ymm6
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,ymm0[12,13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,ymm0[14,15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,ymm0[18],zero,zero,zero
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero
+; AVX512BW-FAST-NEXT:    vpor %ymm7, %ymm8, %ymm7
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm6
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero,ymm3[25],zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vpor %ymm7, %ymm8, %ymm7
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <3,3,3,u,4,4,4,4>
+; AVX512BW-FAST-NEXT:    vpermd %ymm1, %ymm8, %ymm8
 ; AVX512BW-FAST-NEXT:    movl $138547332, %eax # imm = 0x8421084
 ; AVX512BW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm6 {%k1} = ymm3[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm6, %zmm4
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm8 {%k1} = ymm3[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm7
 ; AVX512BW-FAST-NEXT:    movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm5, %zmm4 {%k1}
-; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm6, %zmm7 {%k1}
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm5, %zmm6
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <3,3,3,3,u,4,4,4,12,14,13,13,13,13,12,14>
-; AVX512BW-FAST-NEXT:    vpermd %zmm6, %zmm7, %zmm6
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <3,3,3,3,u,4,4,4,12,14,13,13,13,13,12,14>
+; AVX512BW-FAST-NEXT:    vpermd %zmm6, %zmm8, %zmm6
 ; AVX512BW-FAST-NEXT:    movabsq $1190112520884487201, %rax # imm = 0x1084210842108421
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm6, %zmm4 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm6
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm6[8],zero,xmm6[u,7],zero,xmm6[9],zero,xmm6[u],zero,xmm6[u,10],zero,xmm6[12],zero,xmm6[u,11]
-; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %xmm8
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = zero,xmm8[8,u],zero,xmm8[7],zero,xmm8[9,u,11,u],zero,xmm8[10],zero,xmm8[12,u],zero
-; AVX512BW-FAST-NEXT:    vpor %xmm7, %xmm9, %xmm7
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm9
-; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %xmm10
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8]
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [0,0,1,1,8,8,9,9]
-; AVX512BW-FAST-NEXT:    vpermt2q %zmm7, %zmm12, %zmm11
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = zero,xmm10[6],zero,xmm10[8,u],zero,xmm10[7],zero,xmm10[9],zero,xmm10[11,u],zero,xmm10[10],zero,xmm10[12]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[6],zero,xmm9[8],zero,xmm9[u,7],zero,xmm9[9],zero,xmm9[11],zero,xmm9[u,10],zero,xmm9[12],zero
-; AVX512BW-FAST-NEXT:    vpor %xmm7, %xmm9, %xmm7
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13]
-; AVX512BW-FAST-NEXT:    vpermt2q %zmm7, %zmm12, %zmm6
-; AVX512BW-FAST-NEXT:    movabsq $3570337559743967628, %rax # imm = 0x318C631818C6318C
-; AVX512BW-FAST-NEXT:    kmovq %rax, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm11, %zmm6 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10]
-; AVX512BW-FAST-NEXT:    vpermd %zmm5, %zmm7, %zmm7
-; AVX512BW-FAST-NEXT:    movabsq $595056260442243600, %rax # imm = 0x842108421084210
-; AVX512BW-FAST-NEXT:    kmovq %rax, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm7, %zmm6 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm6, %zmm7 {%k1}
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm3[26],zero,ymm3[28],zero,zero,zero,zero,ymm3[29],zero,ymm3[31],zero,zero,ymm3[30]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero
@@ -2193,8 +2198,8 @@ define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    kmovd %eax, %k1
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm1, %ymm0 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqa %ymm0, 128(%r9)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm6, (%r9)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm4, 64(%r9)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm7, 64(%r9)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm4, (%r9)
 ; AVX512BW-FAST-NEXT:    vzeroupper
 ; AVX512BW-FAST-NEXT:    retq
   %in.vec0 = load <32 x i8>, ptr %in.vecptr0, align 64
@@ -4055,198 +4060,177 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512F-FAST-LABEL: store_i8_stride5_vf64:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    subq $104, %rsp
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm5
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm5, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm6
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19>
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm6, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm5, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19>
+; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm3, %ymm1
 ; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
-; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128>
-; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
+; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128>
+; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm2, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm19
 ; AVX512F-FAST-NEXT:    vpor %xmm0, %xmm1, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm12
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm12, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm7
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm7, %ymm26
-; AVX512F-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12>
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm3, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm28
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm7
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm7, %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm7, %xmm29
-; AVX512F-FAST-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm30
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm21
-; AVX512F-FAST-NEXT:    vpor %ymm1, %ymm0, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm10
-; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm10, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm8
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm8, %ymm1
-; AVX512F-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm31
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm17
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm13
-; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm13, %xmm1
-; AVX512F-FAST-NEXT:    vporq %xmm0, %xmm1, %xmm22
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm1
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm1, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm16
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm4
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm4, %xmm15
-; AVX512F-FAST-NEXT:    vporq %xmm0, %xmm15, %xmm23
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm8
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm9
+; AVX512F-FAST-NEXT:    vpor %ymm8, %ymm9, %ymm7
+; AVX512F-FAST-NEXT:    vmovdqu %ymm7, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm8
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12>
+; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm8, %xmm12
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm7, %xmm26
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm9
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
+; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm9, %xmm13
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm7, %xmm27
+; AVX512F-FAST-NEXT:    vporq %xmm12, %xmm13, %xmm20
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm5[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm5, %zmm22
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[26],zero,ymm3[28],zero,zero,ymm3[27],zero,ymm3[29],zero,ymm3[31],zero,zero,ymm3[30],zero
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero,ymm3[25],zero,zero
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm23
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[27],zero,zero,ymm4[26],zero,ymm4[28],zero,ymm4[30],zero,zero,ymm4[29],zero,ymm4[31],zero,zero
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm4, %ymm17
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm0[19],zero,ymm0[21],zero,zero,ymm0[20],zero,ymm0[22],zero,ymm0[24],zero,zero,ymm0[23],zero
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm13, %zmm18
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm12
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm12, %ymm13
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm7
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm1
+; AVX512F-FAST-NEXT:    vporq %ymm13, %ymm1, %ymm21
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm13
+; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm13, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm11
+; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm11, %ymm10
+; AVX512F-FAST-NEXT:    vporq %ymm1, %ymm10, %ymm24
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm2, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm16
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm4
+; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm15
+; AVX512F-FAST-NEXT:    vporq %xmm1, %xmm15, %xmm25
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm6
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm26, %xmm1
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm6, %xmm15
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm27, %xmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm5
+; AVX512F-FAST-NEXT:    vporq %xmm15, %xmm5, %xmm26
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [1,1,2,2,2,2,2,2]
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm3
-; AVX512F-FAST-NEXT:    vpermd %ymm3, %ymm15, %ymm15
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
-; AVX512F-FAST-NEXT:    vpandnq %ymm15, %ymm24, %ymm15
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm3, %ymm14
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm15, %zmm24
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14>
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm14
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm18
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [2,2,3,3,10,10,11,11]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm25, %zmm14
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128>
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm6, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm20
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128>
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm6, %ymm0
-; AVX512F-FAST-NEXT:    vpermt2q %zmm1, %zmm25, %zmm0
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128>
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm26, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm14
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128>
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm12, %ymm1
-; AVX512F-FAST-NEXT:    vpermt2q %zmm14, %zmm25, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <9,u,11,u,u,10,u,12,u,14,u,u,13,u,15,u,9,u,11,u,u,10,u,12,u,14,u,u,13,u,15,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm12, %ymm15
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25>
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm12
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm5
+; AVX512F-FAST-NEXT:    vpermd %ymm5, %ymm15, %ymm15
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255]
+; AVX512F-FAST-NEXT:    vpandnq %ymm15, %ymm27, %ymm15
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm14
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm15, %zmm30
+; AVX512F-FAST-NEXT:    vmovdqa64 (%r8), %zmm31
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm27 = <4,u,5,5,5,5,u,6,30,30,30,u,31,31,31,31>
+; AVX512F-FAST-NEXT:    vpermi2d %zmm31, %zmm5, %zmm27
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm28 = <4,u,5,5,5,5,u,6>
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm5
+; AVX512F-FAST-NEXT:    vpermd %ymm5, %ymm28, %ymm28
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
+; AVX512F-FAST-NEXT:    vpandnq %ymm28, %ymm29, %ymm28
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm3
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm28, %zmm3, %zmm28
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <9,u,11,u,u,10,u,12,u,14,u,u,13,u,15,u,9,u,11,u,u,10,u,12,u,14,u,u,13,u,15,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm14
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm12[19],zero,ymm12[21],zero,zero,ymm12[20],zero,ymm12[22],zero,ymm12[24],zero,zero,ymm12[23],zero
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm12, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25>
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm3, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm7, %ymm12
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm7[27],zero,zero,ymm7[26],zero,ymm7[28],zero,ymm7[30],zero,zero,ymm7[29],zero,ymm7[31],zero,zero
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3],xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm29 = ymm14[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm13[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm13[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[21],zero,zero,ymm11[20],zero,ymm11[22],zero,ymm11[24],zero,zero,ymm11[23],zero,ymm11[25],zero,zero
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm11[26],zero,ymm11[28],zero,zero,ymm11[27],zero,ymm11[29],zero,ymm11[31],zero,zero,ymm11[30],zero
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm19, %xmm10
+; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm15[0],xmm10[0],xmm15[1],xmm10[1],xmm15[2],xmm10[2],xmm15[3],xmm10[3],xmm15[4],xmm10[4],xmm15[5],xmm10[5],xmm15[6],xmm10[6],xmm15[7],xmm10[7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm25 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400]
-; AVX512F-FAST-NEXT:    vpandq %ymm25, %ymm15, %ymm15
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm12, %zmm12
-; AVX512F-FAST-NEXT:    vporq %zmm1, %zmm12, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm26 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm26, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 (%r8), %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = <4,u,5,5,5,5,u,6,30,30,30,u,31,31,31,31>
-; AVX512F-FAST-NEXT:    vpermi2d %zmm0, %zmm3, %zmm15
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <4,u,5,5,5,5,u,6>
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm12
-; AVX512F-FAST-NEXT:    vpermd %ymm12, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
-; AVX512F-FAST-NEXT:    vpandnq %ymm2, %ymm27, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm12, %ymm9
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm9, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm30, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm3, %ymm6
-; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm3, %ymm9
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm27 = ymm6[2,2,3,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm21, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm3, %ymm5
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm3, %ymm7
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm28, %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm29, %xmm14
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm6[0],xmm14[0],xmm6[1],xmm14[1],xmm6[2],xmm14[2],xmm6[3],xmm14[3],xmm6[4],xmm14[4],xmm6[5],xmm14[5],xmm6[6],xmm14[6],xmm6[7],xmm14[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm18, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm10, %ymm6
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm19, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm10, %ymm10
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm5[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm28 = ymm6[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm8, %ymm11
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm29 = ymm9[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm20, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm8, %ymm8
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8>
-; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm14, %xmm14
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,1,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm9 # 16-byte Folded Reload
-; AVX512F-FAST-NEXT:    # xmm9 = xmm3[0],mem[0],xmm3[1],mem[1],xmm3[2],mem[2],xmm3[3],mem[3],xmm3[4],mem[4],xmm3[5],mem[5],xmm3[6],mem[6],xmm3[7],mem[7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm9, %xmm9
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,1,1]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm17, %xmm3
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm3[0],xmm13[0],xmm3[1],xmm13[1],xmm3[2],xmm13[2],xmm3[3],xmm13[3],xmm3[4],xmm13[4],xmm3[5],xmm13[5],xmm3[6],xmm13[6],xmm3[7],xmm13[7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm13, %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm16, %xmm3
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,0,1,1,8,8,9,9]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm22, %zmm4, %zmm5
-; AVX512F-FAST-NEXT:    vpermt2q %zmm23, %zmm4, %zmm3
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm4
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = <u,0,0,0,0,u,1,1,9,9,u,10,10,10,10,u>
-; AVX512F-FAST-NEXT:    vpermd %zmm4, %zmm5, %zmm4
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm4
-; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm3 = mem[0,0,1,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, (%rsp), %zmm3, %zmm3 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm5 = mem[0,0,1,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm6, %zmm5
-; AVX512F-FAST-NEXT:    vporq %ymm27, %ymm30, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm12, %zmm3
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm25, %ymm28, %ymm11
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm31, %zmm11
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm6, %zmm11
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm25, %ymm29, %ymm7
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm7, %zmm3
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm10, %ymm8
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm6
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm26, %zmm6
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm24
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm15
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <6,6,6,u,7,7,7,7,u,8,8,8,8,u,9,9>
-; AVX512F-FAST-NEXT:    vpermd %zmm0, %zmm1, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm16, %xmm10
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3],xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
+; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm15, %xmm15
+; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm4, %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8>
+; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm8, %xmm8
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,1,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,0,1,1]
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm25, %zmm4, %zmm4
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3],xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm26, %zmm1, %zmm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm31, %zmm5
+; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm6 = mem[0,0,1,1]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm20[0,0,1,1]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm10 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm16, %zmm10
+; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm12, %ymm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm21, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400]
+; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm6, %ymm14, %ymm9
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm24, %zmm9
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm16, %zmm9
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm22[2,2,3,3,6,6,7,7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm12 = zmm23[2,2,3,3,6,6,7,7]
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm12
+; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm6, %ymm2, %ymm7
+; AVX512F-FAST-NEXT:    vpandq %ymm6, %ymm29, %ymm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm2 = zmm18[2,2,3,3,6,6,7,7]
+; AVX512F-FAST-NEXT:    vporq %zmm2, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm12, %zmm2, %zmm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm3
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm13, %ymm11
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm11, %zmm6
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm2, %zmm6
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm30
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm27
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm28
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <6,6,6,u,7,7,7,7,u,8,8,8,8,u,9,9>
+; AVX512F-FAST-NEXT:    vpermd %zmm31, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, (%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, 64(%r9)
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm2 = zmm4[0,0,1,1,4,4,5,5]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[0,0,1,1,4,4,5,5]
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <u,0,0,0,0,u,1,1,9,9,u,10,10,10,10,u>
+; AVX512F-FAST-NEXT:    vpermd %zmm5, %zmm2, %zmm2
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm28, 64(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, (%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, 128(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm15, 256(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm24, 192(%r9)
-; AVX512F-FAST-NEXT:    addq $104, %rsp
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm27, 256(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm30, 192(%r9)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -4440,50 +4424,50 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512BW-FAST-LABEL: store_i8_stride5_vf64:
 ; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%r8), %zmm11
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%r8), %zmm5
 ; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdx), %ymm19
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm9, %ymm19, %ymm0
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rcx), %ymm12
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm10, %ymm12, %ymm1
-; AVX512BW-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
-; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %xmm1
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
+; AVX512BW-FAST-NEXT:    vpshufb %ymm1, %ymm19, %ymm0
+; AVX512BW-FAST-NEXT:    vmovdqa 32(%rcx), %ymm13
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
+; AVX512BW-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm3
+; AVX512BW-FAST-NEXT:    vpor %ymm0, %ymm3, %ymm0
+; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %xmm6
 ; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rcx), %xmm16
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm4, %xmm16, %xmm2
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm3
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm8, %xmm16, %xmm3
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm7
 ; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdx), %xmm18
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm6, %xmm18, %xmm5
-; AVX512BW-FAST-NEXT:    vpor %xmm2, %xmm5, %xmm2
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,1,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm10, %xmm18, %xmm4
+; AVX512BW-FAST-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm9
 ; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %xmm15
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm7, %xmm15, %xmm13
-; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %xmm5
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm15, %xmm3
+; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %xmm11
 ; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rsi), %xmm17
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm8, %xmm17, %xmm14
-; AVX512BW-FAST-NEXT:    vpor %xmm13, %xmm14, %xmm13
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm13[0,0,1,1]
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm14, %xmm17, %xmm4
+; AVX512BW-FAST-NEXT:    vpor %xmm3, %xmm4, %xmm3
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm3[0,0,1,1]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %ymm21
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <3,3,3,u,4,4,4,4>
-; AVX512BW-FAST-NEXT:    vpermd %ymm21, %ymm13, %ymm22
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <3,3,3,u,4,4,4,4>
+; AVX512BW-FAST-NEXT:    vpermd %ymm21, %ymm3, %ymm22
 ; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rsi), %ymm23
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,0,u,u,u,u,1,u,u,u,u,2,u,u,u,u>
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,0,u,u,u,u,1,u,u,u,u,2,u,u,u,u>
 ; AVX512BW-FAST-NEXT:    movl $138547332, %eax # imm = 0x8421084
 ; AVX512BW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-FAST-NEXT:    vpshufb %ymm14, %ymm23, %ymm22 {%k1}
+; AVX512BW-FAST-NEXT:    vpshufb %ymm4, %ymm23, %ymm22 {%k1}
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm22, %zmm20, %zmm20
 ; AVX512BW-FAST-NEXT:    movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k2
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm20, %zmm0 {%k2}
 ; AVX512BW-FAST-NEXT:    vmovdqa64 32(%r8), %ymm20
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <1,1,2,2,2,2,2,2,27,27,27,27,u,28,28,28>
-; AVX512BW-FAST-NEXT:    vpermi2d %zmm11, %zmm20, %zmm22
+; AVX512BW-FAST-NEXT:    vpermi2d %zmm5, %zmm20, %zmm22
 ; AVX512BW-FAST-NEXT:    movabsq $4760450083537948804, %rax # imm = 0x4210842108421084
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm22, %zmm0 {%k3}
@@ -4497,18 +4481,18 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm19 = zmm19[0,1,2,3],mem[4,5,6,7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm19 = zmm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,zmm19[21],zero,zmm19[21,20],zero,zmm19[22],zero,zmm19[24],zero,zmm19[22,23],zero,zmm19[25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm19[59],zero,zero,zmm19[58],zero,zmm19[60],zero,zmm19[62],zero,zero,zmm19[61],zero,zmm19[63],zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm19 = zmm19[2,2,3,3,6,6,7,7]
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm12 = zmm12[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm12 = zmm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm12[19],zero,zmm12[21],zero,zero,zmm12[20],zero,zmm12[22],zero,zmm12[24],zero,zero,zmm12[23],zero,zmm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm12[59],zero,zero,zmm12[58],zero,zmm12[60],zero,zmm12[62],zero,zero,zmm12[61],zero,zmm12[63],zero,zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm12 = zmm12[2,2,3,3,6,6,7,7]
-; AVX512BW-FAST-NEXT:    vporq %zmm19, %zmm12, %zmm12
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm13 = zmm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm13[19],zero,zmm13[21],zero,zero,zmm13[20],zero,zmm13[22],zero,zmm13[24],zero,zero,zmm13[23],zero,zmm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm13[59],zero,zero,zmm13[58],zero,zmm13[60],zero,zmm13[62],zero,zero,zmm13[61],zero,zmm13[63],zero,zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm13 = zmm13[2,2,3,3,6,6,7,7]
+; AVX512BW-FAST-NEXT:    vporq %zmm19, %zmm13, %zmm13
 ; AVX512BW-FAST-NEXT:    movabsq $1785168781326730801, %rax # imm = 0x18C6318C6318C631
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm21, %zmm12 {%k3}
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm21, %zmm13 {%k3}
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
-; AVX512BW-FAST-NEXT:    vpermi2d %zmm11, %zmm20, %zmm19
+; AVX512BW-FAST-NEXT:    vpermi2d %zmm5, %zmm20, %zmm19
 ; AVX512BW-FAST-NEXT:    movabsq $-8925843906633654008, %rax # imm = 0x8421084210842108
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k4
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm19, %zmm12 {%k4}
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm19, %zmm13 {%k4}
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %ymm19
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm20 = ymm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm19[27],zero,zero,ymm19[26],zero,ymm19[28],zero,ymm19[30],zero,zero,ymm19[29],zero,ymm19[31],zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm20[2,2,3,3]
@@ -4520,9 +4504,9 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm18 = <2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8>
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm18, %xmm16, %xmm16
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm16 = ymm16[0,0,1,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm20, %zmm16
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rsi), %ymm20
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm22 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm20[26],zero,ymm20[28],zero,zero,zero,zero,ymm20[29],zero,ymm20[31],zero,zero,ymm20[30]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm20, %zmm20
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rsi), %ymm16
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm22 = ymm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm16[26],zero,ymm16[28],zero,zero,zero,zero,ymm16[29],zero,ymm16[31],zero,zero,ymm16[30]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm22[2,2,3,3]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %ymm23
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm24 = ymm23[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm23[26],zero,ymm23[28],zero,zero,ymm23[27],zero,ymm23[29],zero,ymm23[31],zero,zero,ymm23[30],zero
@@ -4533,61 +4517,62 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm17, %xmm15, %xmm15
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,0,1,1]
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm22, %zmm15
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm16, %zmm15 {%k3}
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [6,6,6,6,7,7,7,7,8,8,8,8,8,8,9,9]
-; AVX512BW-FAST-NEXT:    vpermd %zmm11, %zmm16, %zmm11
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm20, %zmm15 {%k3}
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [6,6,6,6,7,7,7,7,8,8,8,8,8,8,9,9]
+; AVX512BW-FAST-NEXT:    vpermd %zmm5, %zmm20, %zmm5
 ; AVX512BW-FAST-NEXT:    movabsq $2380225041768974402, %rax # imm = 0x2108421084210842
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm11, %zmm15 {%k3}
-; AVX512BW-FAST-NEXT:    vpshufb %ymm9, %ymm21, %ymm9
-; AVX512BW-FAST-NEXT:    vpshufb %ymm10, %ymm19, %ymm10
-; AVX512BW-FAST-NEXT:    vpor %ymm9, %ymm10, %ymm9
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm21[21],zero,ymm21[21,20],zero,ymm21[22],zero,ymm21[24],zero,ymm21[22,23],zero,ymm21[25]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm19[19],zero,ymm19[21],zero,zero,ymm19[20],zero,ymm19[22],zero,ymm19[24],zero,zero,ymm19[23],zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vpor %ymm10, %ymm11, %ymm10
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm20[21],zero,zero,ymm20[20],zero,ymm20[22],zero,ymm20[24],zero,zero,ymm20[23],zero,ymm20[25],zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm23[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm23[21],zero,zero,ymm23[20],zero,ymm23[22],zero,ymm23[24],zero,zero,ymm23[23],zero,ymm23[25],zero,zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vpor %ymm10, %ymm11, %ymm10
-; AVX512BW-FAST-NEXT:    vpermd %ymm23, %ymm13, %ymm11
-; AVX512BW-FAST-NEXT:    vpshufb %ymm14, %ymm20, %ymm11 {%k1}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm9, %zmm10 {%k2}
-; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <3,3,3,3,u,4,4,4,12,14,13,13,13,13,12,14>
-; AVX512BW-FAST-NEXT:    vpermd %zmm9, %zmm11, %zmm11
-; AVX512BW-FAST-NEXT:    movabsq $1190112520884487201, %rax # imm = 0x1084210842108421
-; AVX512BW-FAST-NEXT:    kmovq %rax, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm11, %zmm10 {%k1}
-; AVX512BW-FAST-NEXT:    vpshufb %xmm4, %xmm1, %xmm4
-; AVX512BW-FAST-NEXT:    vpshufb %xmm6, %xmm3, %xmm6
-; AVX512BW-FAST-NEXT:    vpor %xmm4, %xmm6, %xmm4
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
-; AVX512BW-FAST-NEXT:    vpshufb %xmm18, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [0,0,1,1,8,8,9,9]
-; AVX512BW-FAST-NEXT:    vpermt2q %zmm4, %zmm3, %zmm1
-; AVX512BW-FAST-NEXT:    vpshufb %xmm7, %xmm2, %xmm4
-; AVX512BW-FAST-NEXT:    vpshufb %xmm8, %xmm5, %xmm6
-; AVX512BW-FAST-NEXT:    vpor %xmm4, %xmm6, %xmm4
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; AVX512BW-FAST-NEXT:    vpshufb %xmm17, %xmm2, %xmm2
-; AVX512BW-FAST-NEXT:    vpermt2q %zmm4, %zmm3, %zmm2
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm5, %zmm15 {%k3}
+; AVX512BW-FAST-NEXT:    vpshufb %xmm8, %xmm6, %xmm5
+; AVX512BW-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm8
+; AVX512BW-FAST-NEXT:    vpor %xmm5, %xmm8, %xmm5
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512BW-FAST-NEXT:    vpshufb %xmm18, %xmm6, %xmm6
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm6, %zmm5
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm5 = zmm5[0,0,1,1,4,4,5,5]
+; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm9, %xmm6
+; AVX512BW-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm7
+; AVX512BW-FAST-NEXT:    vpor %xmm6, %xmm7, %xmm6
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3],xmm9[4],xmm11[4],xmm9[5],xmm11[5],xmm9[6],xmm11[6],xmm9[7],xmm11[7]
+; AVX512BW-FAST-NEXT:    vpshufb %xmm17, %xmm7, %xmm7
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm7, %zmm6
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[0,0,1,1,4,4,5,5]
 ; AVX512BW-FAST-NEXT:    movabsq $-4165393823095705204, %rax # imm = 0xC6318C6318C6318C
-; AVX512BW-FAST-NEXT:    kmovq %rax, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm2 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10]
-; AVX512BW-FAST-NEXT:    vpermd %zmm9, %zmm1, %zmm1
+; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm5, %zmm6 {%k3}
+; AVX512BW-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10]
+; AVX512BW-FAST-NEXT:    vpermd %zmm5, %zmm7, %zmm7
 ; AVX512BW-FAST-NEXT:    movabsq $595056260442243600, %rax # imm = 0x842108421084210
+; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm7, %zmm6 {%k3}
+; AVX512BW-FAST-NEXT:    vpshufb %ymm1, %ymm21, %ymm1
+; AVX512BW-FAST-NEXT:    vpshufb %ymm2, %ymm19, %ymm2
+; AVX512BW-FAST-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm21[21],zero,ymm21[21,20],zero,ymm21[22],zero,ymm21[24],zero,ymm21[22,23],zero,ymm21[25]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm19[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm19[19],zero,ymm19[21],zero,zero,ymm19[20],zero,ymm19[22],zero,ymm19[24],zero,zero,ymm19[23],zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vpor %ymm2, %ymm7, %ymm2
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm16[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm16[21],zero,zero,ymm16[20],zero,ymm16[22],zero,ymm16[24],zero,zero,ymm16[23],zero,ymm16[25],zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm23[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm23[21],zero,zero,ymm23[20],zero,ymm23[22],zero,ymm23[24],zero,zero,ymm23[23],zero,ymm23[25],zero,zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vpor %ymm2, %ymm7, %ymm2
+; AVX512BW-FAST-NEXT:    vpermd %ymm23, %ymm3, %ymm3
+; AVX512BW-FAST-NEXT:    vpshufb %ymm4, %ymm16, %ymm3 {%k1}
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm2 {%k2}
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <3,3,3,3,u,4,4,4,12,14,13,13,13,13,12,14>
+; AVX512BW-FAST-NEXT:    vpermd %zmm5, %zmm1, %zmm1
+; AVX512BW-FAST-NEXT:    movabsq $1190112520884487201, %rax # imm = 0x1084210842108421
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k1
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm2 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm2, (%r9)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm10, 64(%r9)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm2, 64(%r9)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm6, (%r9)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm15, 128(%r9)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm12, 256(%r9)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm13, 256(%r9)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, 192(%r9)
 ; AVX512BW-FAST-NEXT:    vzeroupper
 ; AVX512BW-FAST-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
index 5eba4928de0b8..e87dd1fe3d572 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
@@ -1784,10 +1784,10 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-LABEL: store_i8_stride6_vf32:
 ; AVX512F-FAST:       # %bb.0:
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm10
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm11
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm12
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm13
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm11
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm13
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm10
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm12
 ; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm8
 ; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm9
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm0
@@ -1795,86 +1795,90 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[10,11,8,9,6,7,12,13,14,15,14,15,14,15,14,15]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm12[0],ymm13[0],ymm12[1],ymm13[1],ymm12[2],ymm13[2],ymm12[3],ymm13[3],ymm12[4],ymm13[4],ymm12[5],ymm13[5],ymm12[6],ymm13[6],ymm12[7],ymm13[7],ymm12[16],ymm13[16],ymm12[17],ymm13[17],ymm12[18],ymm13[18],ymm12[19],ymm13[19],ymm12[20],ymm13[20],ymm12[21],ymm13[21],ymm12[22],ymm13[22],ymm12[23],ymm13[23]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm10[0],ymm12[0],ymm10[1],ymm12[1],ymm10[2],ymm12[2],ymm10[3],ymm12[3],ymm10[4],ymm12[4],ymm10[5],ymm12[5],ymm10[6],ymm12[6],ymm10[7],ymm12[7],ymm10[16],ymm12[16],ymm10[17],ymm12[17],ymm10[18],ymm12[18],ymm10[19],ymm12[19],ymm10[20],ymm12[20],ymm10[21],ymm12[21],ymm10[22],ymm12[22],ymm10[23],ymm12[23]
 ; AVX512F-FAST-NEXT:    vprold $16, %ymm3, %ymm3
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm3
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm7
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm5
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[8,9,6,7,12,13,10,11,14,15,14,15,14,15,14,15]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[2],ymm11[2],ymm10[3],ymm11[3],ymm10[4],ymm11[4],ymm10[5],ymm11[5],ymm10[6],ymm11[6],ymm10[7],ymm11[7],ymm10[16],ymm11[16],ymm10[17],ymm11[17],ymm10[18],ymm11[18],ymm10[19],ymm11[19],ymm10[20],ymm11[20],ymm10[21],ymm11[21],ymm10[22],ymm11[22],ymm10[23],ymm11[23]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,20,21,18,19,24,25,26,27,28,29,26,27]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm14
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm2, %zmm14
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm11[0],ymm13[0],ymm11[1],ymm13[1],ymm11[2],ymm13[2],ymm11[3],ymm13[3],ymm11[4],ymm13[4],ymm11[5],ymm13[5],ymm11[6],ymm13[6],ymm11[7],ymm13[7],ymm11[16],ymm13[16],ymm11[17],ymm13[17],ymm11[18],ymm13[18],ymm11[19],ymm13[19],ymm11[20],ymm13[20],ymm11[21],ymm13[21],ymm11[22],ymm13[22],ymm11[23],ymm13[23]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,20,21,18,19,24,25,26,27,28,29,26,27]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm2, %zmm14
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm17, %zmm14
 ; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm4
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,5,10,11,8,9,6,7,12,13,10,11,12,13,14,15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,0,1]
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm6
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm6[8],xmm3[8],xmm6[9],xmm3[9],xmm6[10],xmm3[10],xmm6[11],xmm3[11],xmm6[12],xmm3[12],xmm6[13],xmm3[13],xmm6[14],xmm3[14],xmm6[15],xmm3[15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[4,5,10,11,8,9,6,7,12,13,10,11,12,13,14,15]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,0,1]
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm15 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[4],ymm9[4],ymm8[5],ymm9[5],ymm8[6],ymm9[6],ymm8[7],ymm9[7],ymm8[16],ymm9[16],ymm8[17],ymm9[17],ymm8[18],ymm9[18],ymm8[19],ymm9[19],ymm8[20],ymm9[20],ymm8[21],ymm9[21],ymm8[22],ymm9[22],ymm8[23],ymm9[23]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,18,19,16,17,22,23,24,25,24,25,24,25,24,25]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm5, %zmm5
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm5
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm11, %ymm15
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm7, %zmm7
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm14, %zmm7
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm12, %ymm15
 ; AVX512F-FAST-NEXT:    vpshufb %ymm14, %ymm10, %ymm14
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm14 = ymm14[0],ymm15[0],ymm14[1],ymm15[1],ymm14[2],ymm15[2],ymm14[3],ymm15[3],ymm14[4],ymm15[4],ymm14[5],ymm15[5],ymm14[6],ymm15[6],ymm14[7],ymm15[7],ymm14[16],ymm15[16],ymm14[17],ymm15[17],ymm14[18],ymm15[18],ymm14[19],ymm15[19],ymm14[20],ymm15[20],ymm14[21],ymm15[21],ymm14[22],ymm15[22],ymm14[23],ymm15[23]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm15 = ymm12[8],ymm13[8],ymm12[9],ymm13[9],ymm12[10],ymm13[10],ymm12[11],ymm13[11],ymm12[12],ymm13[12],ymm12[13],ymm13[13],ymm12[14],ymm13[14],ymm12[15],ymm13[15],ymm12[24],ymm13[24],ymm12[25],ymm13[25],ymm12[26],ymm13[26],ymm12[27],ymm13[27],ymm12[28],ymm13[28],ymm12[29],ymm13[29],ymm12[30],ymm13[30],ymm12[31],ymm13[31]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [2,2,2,3,10,10,10,11]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm15, %zmm16, %zmm14
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm13, %ymm13
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm12, %ymm12
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm12 = ymm12[0],ymm13[0],ymm12[1],ymm13[1],ymm12[2],ymm13[2],ymm12[3],ymm13[3],ymm12[4],ymm13[4],ymm12[5],ymm13[5],ymm12[6],ymm13[6],ymm12[7],ymm13[7],ymm12[16],ymm13[16],ymm12[17],ymm13[17],ymm12[18],ymm13[18],ymm12[19],ymm13[19],ymm12[20],ymm13[20],ymm12[21],ymm13[21],ymm12[22],ymm13[22],ymm12[23],ymm13[23]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm10 = ymm10[8],ymm11[8],ymm10[9],ymm11[9],ymm10[10],ymm11[10],ymm10[11],ymm11[11],ymm10[12],ymm11[12],ymm10[13],ymm11[13],ymm10[14],ymm11[14],ymm10[15],ymm11[15],ymm10[24],ymm11[24],ymm10[25],ymm11[25],ymm10[26],ymm11[26],ymm10[27],ymm11[27],ymm10[28],ymm11[28],ymm10[29],ymm11[29],ymm10[30],ymm11[30],ymm10[31],ymm11[31]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm10, %zmm16, %zmm12
-; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm10 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-FAST-NEXT:    # zmm10 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm10, %zmm11
-; AVX512F-FAST-NEXT:    vpandq %zmm11, %zmm12, %zmm11
-; AVX512F-FAST-NEXT:    vpternlogq $242, %zmm14, %zmm10, %zmm11
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm15 = ymm11[8],ymm13[8],ymm11[9],ymm13[9],ymm11[10],ymm13[10],ymm11[11],ymm13[11],ymm11[12],ymm13[12],ymm11[13],ymm13[13],ymm11[14],ymm13[14],ymm11[15],ymm13[15],ymm11[24],ymm13[24],ymm11[25],ymm13[25],ymm11[26],ymm13[26],ymm11[27],ymm13[27],ymm11[28],ymm13[28],ymm11[29],ymm13[29],ymm11[30],ymm13[30],ymm11[31],ymm13[31]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm14, %zmm14
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm14 = zmm14[2,2,2,3,6,6,6,7]
+; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm15 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
+; AVX512F-FAST-NEXT:    # zmm15 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm15, %zmm16
+; AVX512F-FAST-NEXT:    vpandq %zmm16, %zmm14, %zmm14
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm13
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm11, %ymm2
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm13[0],ymm2[1],ymm13[1],ymm2[2],ymm13[2],ymm2[3],ymm13[3],ymm2[4],ymm13[4],ymm2[5],ymm13[5],ymm2[6],ymm13[6],ymm2[7],ymm13[7],ymm2[16],ymm13[16],ymm2[17],ymm13[17],ymm2[18],ymm13[18],ymm2[19],ymm13[19],ymm2[20],ymm13[20],ymm2[21],ymm13[21],ymm2[22],ymm13[22],ymm2[23],ymm13[23]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm10 = ymm10[8],ymm12[8],ymm10[9],ymm12[9],ymm10[10],ymm12[10],ymm10[11],ymm12[11],ymm10[12],ymm12[12],ymm10[13],ymm12[13],ymm10[14],ymm12[14],ymm10[15],ymm12[15],ymm10[24],ymm12[24],ymm10[25],ymm12[25],ymm10[26],ymm12[26],ymm10[27],ymm12[27],ymm10[28],ymm12[28],ymm10[29],ymm12[29],ymm10[30],ymm12[30],ymm10[31],ymm12[31]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm2, %zmm2
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm2 = zmm2[2,2,2,3,6,6,6,7]
+; AVX512F-FAST-NEXT:    vpternlogq $186, %zmm14, %zmm15, %zmm2
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u,6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm9, %ymm12
+; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm9, %ymm11
 ; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm8, %ymm10
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm10 = ymm10[0],ymm12[0],ymm10[1],ymm12[1],ymm10[2],ymm12[2],ymm10[3],ymm12[3],ymm10[4],ymm12[4],ymm10[5],ymm12[5],ymm10[6],ymm12[6],ymm10[7],ymm12[7],ymm10[16],ymm12[16],ymm10[17],ymm12[17],ymm10[18],ymm12[18],ymm10[19],ymm12[19],ymm10[20],ymm12[20],ymm10[21],ymm12[21],ymm10[22],ymm12[22],ymm10[23],ymm12[23]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm10 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[2],ymm11[2],ymm10[3],ymm11[3],ymm10[4],ymm11[4],ymm10[5],ymm11[5],ymm10[6],ymm11[6],ymm10[7],ymm11[7],ymm10[16],ymm11[16],ymm10[17],ymm11[17],ymm10[18],ymm11[18],ymm10[19],ymm11[19],ymm10[20],ymm11[20],ymm10[21],ymm11[21],ymm10[22],ymm11[22],ymm10[23],ymm11[23]
 ; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm8 = ymm8[8],ymm9[8],ymm8[9],ymm9[9],ymm8[10],ymm9[10],ymm8[11],ymm9[11],ymm8[12],ymm9[12],ymm8[13],ymm9[13],ymm8[14],ymm9[14],ymm8[15],ymm9[15],ymm8[24],ymm9[24],ymm8[25],ymm9[25],ymm8[26],ymm9[26],ymm8[27],ymm9[27],ymm8[28],ymm9[28],ymm8[29],ymm9[29],ymm8[30],ymm9[30],ymm8[31],ymm9[31]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27,24,25,22,23,28,29,26,27,28,29,30,31]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm8, %zmm16, %zmm10
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm10
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm9
-; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm8
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3],xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm10, %zmm8
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm8 = zmm8[2,2,2,3,6,6,6,7]
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm8
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm9
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm2
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1],xmm2[2],xmm9[2],xmm2[3],xmm9[3],xmm2[4],xmm9[4],xmm2[5],xmm9[5],xmm2[6],xmm9[6],xmm2[7],xmm9[7]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[0,1,6,7,4,5,2,3,8,9,10,11,12,13,10,11]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,0,0,1,8,8,8,9]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm8, %zmm11, %zmm9
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm6, %xmm6
-; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm9, %zmm2
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm2 = zmm2[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u>
+; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm4, %xmm4
+; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm5
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
 ; AVX512F-FAST-NEXT:    vprold $16, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpermt2q %zmm6, %zmm11, %zmm0
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm6
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[4,5,2,3,0,1,6,7,8,9,8,9,8,9,8,9]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm1, %zmm11, %zmm3
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm2, %zmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm10, 128(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, 64(%rax)
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,5,2,3,0,1,6,7,8,9,8,9,8,9,8,9]
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm1
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm17, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, 128(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, 64(%rax)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -1904,16 +1908,17 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    movw $18724, %cx # imm = 0x4924
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
 ; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm6, %ymm10 {%k1}
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3],xmm9[4],xmm7[4],xmm9[5],xmm7[5],xmm9[6],xmm7[6],xmm9[7],xmm7[7]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm6
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3],xmm9[4],xmm7[4],xmm9[5],xmm7[5],xmm9[6],xmm7[6],xmm9[7],xmm7[7]
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm6, %ymm11, %ymm6
+; AVX512BW-SLOW-NEXT:    vpermw %ymm10, %ymm11, %ymm10
 ; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm12[0],xmm8[0],xmm12[1],xmm8[1],xmm12[2],xmm8[2],xmm12[3],xmm8[3],xmm12[4],xmm8[4],xmm12[5],xmm8[5],xmm12[6],xmm8[6],xmm12[7],xmm8[7]
 ; AVX512BW-SLOW-NEXT:    vprold $16, %xmm11, %xmm11
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,0,1]
 ; AVX512BW-SLOW-NEXT:    movw $9362, %cx # imm = 0x2492
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k2
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm11, %ymm6 {%k2}
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm6[0,1,2,3],zmm10[0,1,2,3]
+; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm11, %ymm10 {%k2}
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm10[0,1,2,3],zmm6[4,5,6,7]
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%r9), %xmm10
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u>
 ; AVX512BW-SLOW-NEXT:    vpshufb %xmm11, %xmm10, %xmm13
@@ -1935,29 +1940,31 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vprold $16, %ymm13, %ymm13
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,2,2,3]
 ; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm13, %ymm11 {%k2}
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm11
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm12[8],xmm8[8],xmm12[9],xmm8[9],xmm12[10],xmm8[10],xmm12[11],xmm8[11],xmm12[12],xmm8[12],xmm12[13],xmm8[13],xmm12[14],xmm8[14],xmm12[15],xmm8[15]
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm9[8],xmm7[8],xmm9[9],xmm7[9],xmm9[10],xmm7[10],xmm9[11],xmm7[11],xmm9[12],xmm7[12],xmm9[13],xmm7[13],xmm9[14],xmm7[14],xmm9[15],xmm7[15]
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
 ; AVX512BW-SLOW-NEXT:    vpermw %ymm7, %ymm9, %ymm7
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
 ; AVX512BW-SLOW-NEXT:    vpermw %ymm8, %ymm9, %ymm7 {%k1}
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm11[0,1,2,3]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,2,3],zmm11[4,5,6,7]
 ; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm8 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm14[8],xmm10[8],xmm14[9],xmm10[9],xmm14[10],xmm10[10],xmm14[11],xmm10[11],xmm14[12],xmm10[12],xmm14[13],xmm10[13],xmm14[14],xmm10[14],xmm14[15],xmm10[15]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,42,41,40,43,42,41,40,43,42,41,40,43,44,44,44,44]
-; AVX512BW-SLOW-NEXT:    vpermi2w %zmm8, %zmm9, %zmm10
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm8
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
 ; AVX512BW-SLOW-NEXT:    movl $1227133513, %ecx # imm = 0x49249249
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k2
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm10, %zmm7 {%k2}
+; AVX512BW-SLOW-NEXT:    vpermw %zmm8, %zmm9, %zmm7 {%k2}
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm8 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm9 = ymm4[8],ymm5[8],ymm4[9],ymm5[9],ymm4[10],ymm5[10],ymm4[11],ymm5[11],ymm4[12],ymm5[12],ymm4[13],ymm5[13],ymm4[14],ymm5[14],ymm4[15],ymm5[15],ymm4[24],ymm5[24],ymm4[25],ymm5[25],ymm4[26],ymm5[26],ymm4[27],ymm5[27],ymm4[28],ymm5[28],ymm4[29],ymm5[29],ymm4[30],ymm5[30],ymm4[31],ymm5[31]
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
 ; AVX512BW-SLOW-NEXT:    vpermw %ymm9, %ymm10, %ymm9
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
 ; AVX512BW-SLOW-NEXT:    vpermw %ymm8, %ymm10, %ymm9 {%k1}
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm8, %ymm5, %ymm5
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm8, %ymm4, %ymm4
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm8
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm9, %ymm5, %ymm5
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm9, %ymm4, %ymm4
 ; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[16],ymm5[16],ymm4[17],ymm5[17],ymm4[18],ymm5[18],ymm4[19],ymm5[19],ymm4[20],ymm5[20],ymm4[21],ymm5[21],ymm4[22],ymm5[22],ymm4[23],ymm5[23]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
@@ -1966,7 +1973,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
 ; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm4, %ymm2 {%k1}
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm9[0,1,2,3]
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm8[4,5,6,7]
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u,6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u>
 ; AVX512BW-SLOW-NEXT:    vpshufb %ymm3, %ymm1, %ymm4
 ; AVX512BW-SLOW-NEXT:    vpshufb %ymm3, %ymm0, %ymm3
@@ -2019,7 +2026,8 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    movw $18724, %cx # imm = 0x4924
 ; AVX512BW-FAST-NEXT:    kmovd %ecx, %k1
 ; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm6, %ymm12 {%k1}
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm11[0,1,2,3],zmm12[0,1,2,3]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm6
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm11[0,1,2,3],zmm6[4,5,6,7]
 ; AVX512BW-FAST-NEXT:    vmovdqa (%r9), %xmm11
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u>
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm11, %xmm13
@@ -2040,29 +2048,31 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vpermw %ymm13, %ymm15, %ymm13
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = [9,8,11,10,9,8,11,10,9,8,11,10,13,12,15,14]
 ; AVX512BW-FAST-NEXT:    vpermw %ymm12, %ymm15, %ymm13 {%k2}
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm12
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7]
 ; AVX512BW-FAST-NEXT:    vpermw %ymm8, %ymm9, %ymm8
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
 ; AVX512BW-FAST-NEXT:    vpermw %ymm7, %ymm9, %ymm8 {%k1}
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm8[0,1,2,3],zmm13[0,1,2,3]
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm8[0,1,2,3],zmm12[4,5,6,7]
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm8 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm14[8],xmm11[8],xmm14[9],xmm11[9],xmm14[10],xmm11[10],xmm14[11],xmm11[11],xmm14[12],xmm11[12],xmm14[13],xmm11[13],xmm14[14],xmm11[14],xmm14[15],xmm11[15]
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,42,41,40,43,42,41,40,43,42,41,40,43,44,44,44,44]
-; AVX512BW-FAST-NEXT:    vpermi2w %zmm8, %zmm9, %zmm10
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm8
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7,26,25,24,27,26,25,24,27,26,25,24,27,28,28,28,28]
 ; AVX512BW-FAST-NEXT:    movl $1227133513, %ecx # imm = 0x49249249
 ; AVX512BW-FAST-NEXT:    kmovd %ecx, %k2
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm10, %zmm7 {%k2}
+; AVX512BW-FAST-NEXT:    vpermw %zmm8, %zmm9, %zmm7 {%k2}
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm8 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm9 = ymm4[8],ymm5[8],ymm4[9],ymm5[9],ymm4[10],ymm5[10],ymm4[11],ymm5[11],ymm4[12],ymm5[12],ymm4[13],ymm5[13],ymm4[14],ymm5[14],ymm4[15],ymm5[15],ymm4[24],ymm5[24],ymm4[25],ymm5[25],ymm4[26],ymm5[26],ymm4[27],ymm5[27],ymm4[28],ymm5[28],ymm4[29],ymm5[29],ymm4[30],ymm5[30],ymm4[31],ymm5[31]
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
 ; AVX512BW-FAST-NEXT:    vpermw %ymm9, %ymm10, %ymm9
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
 ; AVX512BW-FAST-NEXT:    vpermw %ymm8, %ymm10, %ymm9 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %ymm8, %ymm5, %ymm5
-; AVX512BW-FAST-NEXT:    vpshufb %ymm8, %ymm4, %ymm4
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm8
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
+; AVX512BW-FAST-NEXT:    vpshufb %ymm9, %ymm5, %ymm5
+; AVX512BW-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm4
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[16],ymm5[16],ymm4[17],ymm5[17],ymm4[18],ymm5[18],ymm4[19],ymm5[19],ymm4[20],ymm5[20],ymm4[21],ymm5[21],ymm4[22],ymm5[22],ymm4[23],ymm5[23]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
@@ -2071,7 +2081,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
 ; AVX512BW-FAST-NEXT:    vmovdqu16 %ymm4, %ymm2 {%k1}
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm9[0,1,2,3]
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm8[4,5,6,7]
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u,6,5,8,7,u,9,u,u,u,u,u,u,u,u,u,u>
 ; AVX512BW-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm4
 ; AVX512BW-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm3
@@ -4103,12 +4113,12 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u>
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm10, %ymm15
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm15, %zmm25
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm15
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm1, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm15, %ymm11
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm15
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm15, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm1, %ymm11
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm11[0],ymm0[0],ymm11[1],ymm0[1],ymm11[2],ymm0[2],ymm11[3],ymm0[3],ymm11[4],ymm0[4],ymm11[5],ymm0[5],ymm11[6],ymm0[6],ymm11[7],ymm0[7],ymm11[16],ymm0[16],ymm11[17],ymm0[17],ymm11[18],ymm0[18],ymm11[19],ymm0[19],ymm11[20],ymm0[20],ymm11[21],ymm0[21],ymm11[22],ymm0[22],ymm11[23],ymm0[23]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm11 = ymm15[8],ymm1[8],ymm15[9],ymm1[9],ymm15[10],ymm1[10],ymm15[11],ymm1[11],ymm15[12],ymm1[12],ymm15[13],ymm1[13],ymm15[14],ymm1[14],ymm15[15],ymm1[15],ymm15[24],ymm1[24],ymm15[25],ymm1[25],ymm15[26],ymm1[26],ymm15[27],ymm1[27],ymm15[28],ymm1[28],ymm15[29],ymm1[29],ymm15[30],ymm1[30],ymm15[31],ymm1[31]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm11 = ymm1[8],ymm15[8],ymm1[9],ymm15[9],ymm1[10],ymm15[10],ymm1[11],ymm15[11],ymm1[12],ymm15[12],ymm1[13],ymm15[13],ymm1[14],ymm15[14],ymm1[15],ymm15[15],ymm1[24],ymm15[24],ymm1[25],ymm15[25],ymm1[26],ymm15[26],ymm1[27],ymm15[27],ymm1[28],ymm15[28],ymm1[29],ymm15[29],ymm1[30],ymm15[30],ymm1[31],ymm15[31]
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm11, %ymm11
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm24
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm0
@@ -4128,7 +4138,7 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm13
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm9[8],xmm5[8],xmm9[9],xmm5[9],xmm9[10],xmm5[10],xmm9[11],xmm5[11],xmm9[12],xmm5[12],xmm9[13],xmm5[13],xmm9[14],xmm5[14],xmm9[15],xmm5[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm14
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm15[0],ymm1[0],ymm15[1],ymm1[1],ymm15[2],ymm1[2],ymm15[3],ymm1[3],ymm15[4],ymm1[4],ymm15[5],ymm1[5],ymm15[6],ymm1[6],ymm15[7],ymm1[7],ymm15[16],ymm1[16],ymm15[17],ymm1[17],ymm15[18],ymm1[18],ymm15[19],ymm1[19],ymm15[20],ymm1[20],ymm15[21],ymm1[21],ymm15[22],ymm1[22],ymm15[23],ymm1[23]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm15[0],ymm1[1],ymm15[1],ymm1[2],ymm15[2],ymm1[3],ymm15[3],ymm1[4],ymm15[4],ymm1[5],ymm15[5],ymm1[6],ymm15[6],ymm1[7],ymm15[7],ymm1[16],ymm15[16],ymm1[17],ymm15[17],ymm1[18],ymm15[18],ymm1[19],ymm15[19],ymm1[20],ymm15[20],ymm1[21],ymm15[21],ymm1[22],ymm15[22],ymm1[23],ymm15[23]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm18
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = [10,11,8,9,6,7,12,13,14,15,14,15,14,15,14,15]
@@ -4217,7 +4227,8 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm3, %ymm15, %ymm1
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm8, %ymm3, %ymm9
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm9[0,1,2,3],zmm1[0,1,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm9[0,1,2,3],zmm1[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm7, %zmm7
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255]
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm8, %zmm7
@@ -4247,7 +4258,8 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm9, %ymm1
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm15, %ymm5
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm9, %ymm3, %ymm13
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm13[0,1,2,3],zmm5[0,1,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm1
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm13[0,1,2,3],zmm1[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm29, %zmm28, %zmm5
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm8, %zmm5
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm1 = zmm26[0,0,0,1,4,4,4,5]
@@ -4263,7 +4275,8 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm9, %ymm30
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm8, %ymm15, %ymm31
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm31[0,1,2,3],zmm30[0,1,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm1
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm31[0,1,2,3],zmm1[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpermq $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # zmm8 = mem[0,0,0,1,4,4,4,5]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
@@ -4271,7 +4284,8 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm12, %ymm15, %ymm11
 ; AVX512F-SLOW-NEXT:    vextracti64x4 $1, %zmm12, %ymm1
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm9, %ymm14
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm11[0,1,2,3],zmm14[0,1,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm0, %zmm1
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm11[0,1,2,3],zmm1[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpermq $64, (%rsp), %zmm11 # 64-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # zmm11 = mem[0,0,0,1,4,4,4,5]
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm13, %zmm11
@@ -4289,11 +4303,13 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm1, %ymm3, %ymm4
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm12, %ymm9, %ymm0
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %ymm13, %ymm9, %ymm2
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm6[0,1,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm1
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm1 = zmm25[2,2,2,3,6,6,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm1
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm4[0,1,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm0[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm22[2,2,2,3,6,6,6,7]
 ; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm2
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
@@ -4309,271 +4325,277 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512F-FAST-LABEL: store_i8_stride6_vf64:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    subq $280, %rsp # imm = 0x118
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm6
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm7
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [10,11,8,9,6,7,12,13,14,15,14,15,14,15,14,15]
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
+; AVX512F-FAST-NEXT:    subq $200, %rsp
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm10
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm3
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm7
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm8
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %ymm12
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm7, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm8, %ymm1
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm1 = ymm8[8],ymm7[8],ymm8[9],ymm7[9],ymm8[10],ymm7[10],ymm8[11],ymm7[11],ymm8[12],ymm7[12],ymm8[13],ymm7[13],ymm8[14],ymm7[14],ymm8[15],ymm7[15],ymm8[24],ymm7[24],ymm8[25],ymm7[25],ymm8[26],ymm7[26],ymm8[27],ymm7[27],ymm8[28],ymm7[28],ymm8[29],ymm7[29],ymm8[30],ymm7[30],ymm8[31],ymm7[31]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm8, %ymm26
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm7, %ymm27
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31>
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm10
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm11
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[4],ymm10[4],ymm11[5],ymm10[5],ymm11[6],ymm10[6],ymm11[7],ymm10[7],ymm11[16],ymm10[16],ymm11[17],ymm10[17],ymm11[18],ymm10[18],ymm11[19],ymm10[19],ymm11[20],ymm10[20],ymm11[21],ymm10[21],ymm11[22],ymm10[22],ymm11[23],ymm10[23]
-; AVX512F-FAST-NEXT:    vprold $16, %ymm0, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm24
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [8,9,6,7,12,13,10,11,14,15,14,15,14,15,14,15]
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm2, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm6, %ymm1
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm1 = ymm6[8],ymm2[8],ymm6[9],ymm2[9],ymm6[10],ymm2[10],ymm6[11],ymm2[11],ymm6[12],ymm2[12],ymm6[13],ymm2[13],ymm6[14],ymm2[14],ymm6[15],ymm2[15],ymm6[24],ymm2[24],ymm6[25],ymm2[25],ymm6[26],ymm2[26],ymm6[27],ymm2[27],ymm6[28],ymm2[28],ymm6[29],ymm2[29],ymm6[30],ymm2[30],ymm6[31],ymm2[31]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm6, %ymm28
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm29
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31>
+; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm4
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm12
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm12[0],ymm4[0],ymm12[1],ymm4[1],ymm12[2],ymm4[2],ymm12[3],ymm4[3],ymm12[4],ymm4[4],ymm12[5],ymm4[5],ymm12[6],ymm4[6],ymm12[7],ymm4[7],ymm12[16],ymm4[16],ymm12[17],ymm4[17],ymm12[18],ymm4[18],ymm12[19],ymm4[19],ymm12[20],ymm4[20],ymm12[21],ymm4[21],ymm12[22],ymm4[22],ymm12[23],ymm4[23]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,20,21,18,19,24,25,26,27,28,29,26,27>
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm3
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm3[2,2,2,3]
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm14
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm14[2,u,1,u,0,u,3,u,u,u,u,u,4,u,u,u,18,u,17,u,16,u,19,u,u,u,u,u,20,u,u,u]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm3[2,2,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15>
+; AVX512F-FAST-NEXT:    vmovdqa %ymm12, %ymm6
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm12, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm12, %ymm12
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm6, %ymm30
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm12, %zmm2
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm2, (%rsp) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm15
+; AVX512F-FAST-NEXT:    vmovdqa %ymm3, %ymm6
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm3, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm15, %ymm5
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm5[0],ymm2[0],ymm5[1],ymm2[1],ymm5[2],ymm2[2],ymm5[3],ymm2[3],ymm5[4],ymm2[4],ymm5[5],ymm2[5],ymm5[6],ymm2[6],ymm5[7],ymm2[7],ymm5[16],ymm2[16],ymm5[17],ymm2[17],ymm5[18],ymm2[18],ymm5[19],ymm2[19],ymm5[20],ymm2[20],ymm5[21],ymm2[21],ymm5[22],ymm2[22],ymm5[23],ymm2[23]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm15[8],ymm3[8],ymm15[9],ymm3[9],ymm15[10],ymm3[10],ymm15[11],ymm3[11],ymm15[12],ymm3[12],ymm15[13],ymm3[13],ymm15[14],ymm3[14],ymm15[15],ymm3[15],ymm15[24],ymm3[24],ymm15[25],ymm3[25],ymm15[26],ymm3[26],ymm15[27],ymm3[27],ymm15[28],ymm3[28],ymm15[29],ymm3[29],ymm15[30],ymm3[30],ymm15[31],ymm3[31]
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm5, %ymm8
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm7
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm2, %zmm2
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm8
+; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm10, %ymm9
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm9[0],ymm2[0],ymm9[1],ymm2[1],ymm9[2],ymm2[2],ymm9[3],ymm2[3],ymm9[4],ymm2[4],ymm9[5],ymm2[5],ymm9[6],ymm2[6],ymm9[7],ymm2[7],ymm9[16],ymm2[16],ymm9[17],ymm2[17],ymm9[18],ymm2[18],ymm9[19],ymm2[19],ymm9[20],ymm2[20],ymm9[21],ymm2[21],ymm9[22],ymm2[22],ymm9[23],ymm2[23]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm9 = ymm10[8],ymm4[8],ymm10[9],ymm4[9],ymm10[10],ymm4[10],ymm10[11],ymm4[11],ymm10[12],ymm4[12],ymm10[13],ymm4[13],ymm10[14],ymm4[14],ymm10[15],ymm4[15],ymm10[24],ymm4[24],ymm10[25],ymm4[25],ymm10[26],ymm4[26],ymm10[27],ymm4[27],ymm10[28],ymm4[28],ymm10[29],ymm4[29],ymm10[30],ymm4[30],ymm10[31],ymm4[31]
+; AVX512F-FAST-NEXT:    vmovdqa %ymm10, %ymm5
+; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm9, %ymm11
+; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm10
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm2, %zmm2
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm7, %xmm30
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm6, %xmm23
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm25
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm13
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm6
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm6[8],xmm13[8],xmm6[9],xmm13[9],xmm6[10],xmm13[10],xmm6[11],xmm13[11],xmm6[12],xmm13[12],xmm6[13],xmm13[13],xmm6[14],xmm13[14],xmm6[15],xmm13[15]
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm27
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm3[0],ymm1[0],ymm3[1],ymm1[1],ymm3[2],ymm1[2],ymm3[3],ymm1[3],ymm3[4],ymm1[4],ymm3[5],ymm1[5],ymm3[6],ymm1[6],ymm3[7],ymm1[7],ymm3[16],ymm1[16],ymm3[17],ymm1[17],ymm3[18],ymm1[18],ymm3[19],ymm1[19],ymm3[20],ymm1[20],ymm3[21],ymm1[21],ymm3[22],ymm1[22],ymm3[23],ymm1[23]
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm10, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm10, %ymm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm24
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u>
+; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm7, %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm8, %xmm1
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [0,1,6,7,4,5,2,3,8,9,10,11,12,13,10,11]
+; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm25
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %xmm11
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9>
+; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,2,u,1,u,0,u,3,u,4,u,4,u,4,u,4>
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm11, %xmm1
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm23
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm12
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm12[8],xmm0[8],xmm12[9],xmm0[9],xmm12[10],xmm0[10],xmm12[11],xmm0[11],xmm12[12],xmm0[12],xmm12[13],xmm0[13],xmm12[14],xmm0[14],xmm12[15],xmm0[15]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm12, %xmm12
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm12, %zmm21
+; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm12
+; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm12, %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm12, %xmm3
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm3, %zmm22
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm5[0],ymm4[0],ymm5[1],ymm4[1],ymm5[2],ymm4[2],ymm5[3],ymm4[3],ymm5[4],ymm4[4],ymm5[5],ymm4[5],ymm5[6],ymm4[6],ymm5[7],ymm4[7],ymm5[16],ymm4[16],ymm5[17],ymm4[17],ymm5[18],ymm4[18],ymm5[19],ymm4[19],ymm5[20],ymm4[20],ymm5[21],ymm4[21],ymm5[22],ymm4[22],ymm5[23],ymm4[23]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm20
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm14
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm4
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm15[0],ymm6[0],ymm15[1],ymm6[1],ymm15[2],ymm6[2],ymm15[3],ymm6[3],ymm15[4],ymm6[4],ymm15[5],ymm6[5],ymm15[6],ymm6[6],ymm15[7],ymm6[7],ymm15[16],ymm6[16],ymm15[17],ymm6[17],ymm15[18],ymm6[18],ymm15[19],ymm6[19],ymm15[20],ymm6[20],ymm15[21],ymm6[21],ymm15[22],ymm6[22],ymm15[23],ymm6[23]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = [10,11,8,9,6,7,12,13,14,15,14,15,14,15,14,15]
+; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm0, %xmm9
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm3
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm2
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm6, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm31
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm28, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm29, %ymm6
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm6[0],ymm0[1],ymm6[1],ymm0[2],ymm6[2],ymm0[3],ymm6[3],ymm0[4],ymm6[4],ymm0[5],ymm6[5],ymm0[6],ymm6[6],ymm0[7],ymm6[7],ymm0[16],ymm6[16],ymm0[17],ymm6[17],ymm0[18],ymm6[18],ymm0[19],ymm6[19],ymm0[20],ymm6[20],ymm0[21],ymm6[21],ymm0[22],ymm6[22],ymm0[23],ymm6[23]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm28
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm1, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm3, %ymm5
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm5[0],ymm0[0],ymm5[1],ymm0[1],ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[4],ymm0[4],ymm5[5],ymm0[5],ymm5[6],ymm0[6],ymm5[7],ymm0[7],ymm5[16],ymm0[16],ymm5[17],ymm0[17],ymm5[18],ymm0[18],ymm5[19],ymm0[19],ymm5[20],ymm0[20],ymm5[21],ymm0[21],ymm5[22],ymm0[22],ymm5[23],ymm0[23]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm1 = ymm3[8],ymm1[8],ymm3[9],ymm1[9],ymm3[10],ymm1[10],ymm3[11],ymm1[11],ymm3[12],ymm1[12],ymm3[13],ymm1[13],ymm3[14],ymm1[14],ymm3[15],ymm1[15],ymm3[24],ymm1[24],ymm3[25],ymm1[25],ymm3[26],ymm1[26],ymm3[27],ymm1[27],ymm3[28],ymm1[28],ymm3[29],ymm1[29],ymm3[30],ymm1[30],ymm3[31],ymm1[31]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31>
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm17
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [2,2,2,3,10,10,10,11]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm1, %zmm22, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm5
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm7
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm8
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm15 = ymm8[0],ymm7[0],ymm8[1],ymm7[1],ymm8[2],ymm7[2],ymm8[3],ymm7[3],ymm8[4],ymm7[4],ymm8[5],ymm7[5],ymm8[6],ymm7[6],ymm8[7],ymm7[7],ymm8[16],ymm7[16],ymm8[17],ymm7[17],ymm8[18],ymm7[18],ymm8[19],ymm7[19],ymm8[20],ymm7[20],ymm8[21],ymm7[21],ymm8[22],ymm7[22],ymm8[23],ymm7[23]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm5[0],ymm3[0],ymm5[1],ymm3[1],ymm5[2],ymm3[2],ymm5[3],ymm3[3],ymm5[4],ymm3[4],ymm5[5],ymm3[5],ymm5[6],ymm3[6],ymm5[7],ymm3[7],ymm5[16],ymm3[16],ymm5[17],ymm3[17],ymm5[18],ymm3[18],ymm5[19],ymm3[19],ymm5[20],ymm3[20],ymm5[21],ymm3[21],ymm5[22],ymm3[22],ymm5[23],ymm3[23]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm26
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm5[8],ymm3[8],ymm5[9],ymm3[9],ymm5[10],ymm3[10],ymm5[11],ymm3[11],ymm5[12],ymm3[12],ymm5[13],ymm3[13],ymm5[14],ymm3[14],ymm5[15],ymm3[15],ymm5[24],ymm3[24],ymm5[25],ymm3[25],ymm5[26],ymm3[26],ymm5[27],ymm3[27],ymm5[28],ymm3[28],ymm5[29],ymm3[29],ymm5[30],ymm3[30],ymm5[31],ymm3[31]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31>
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm3, %ymm3
-; AVX512F-FAST-NEXT:    vpermt2q %zmm3, %zmm22, %zmm15
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm1, %zmm15
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, %zmm16
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15>
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm8
-; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm8
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm4, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm12, %ymm9
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm9[0],ymm0[0],ymm9[1],ymm0[1],ymm9[2],ymm0[2],ymm9[3],ymm0[3],ymm9[4],ymm0[4],ymm9[5],ymm0[5],ymm9[6],ymm0[6],ymm9[7],ymm0[7],ymm9[16],ymm0[16],ymm9[17],ymm0[17],ymm9[18],ymm0[18],ymm9[19],ymm0[19],ymm9[20],ymm0[20],ymm9[21],ymm0[21],ymm9[22],ymm0[22],ymm9[23],ymm0[23]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm12[8],ymm4[8],ymm12[9],ymm4[9],ymm12[10],ymm4[10],ymm12[11],ymm4[11],ymm12[12],ymm4[12],ymm12[13],ymm4[13],ymm12[14],ymm4[14],ymm12[15],ymm4[15],ymm12[24],ymm4[24],ymm12[25],ymm4[25],ymm12[26],ymm4[26],ymm12[27],ymm4[27],ymm12[28],ymm4[28],ymm12[29],ymm4[29],ymm12[30],ymm4[30],ymm12[31],ymm4[31]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm8
-; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm4, %ymm4
-; AVX512F-FAST-NEXT:    vpermt2q %zmm4, %zmm22, %zmm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm10, %ymm4
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm11, %ymm2
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm12 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm11[8],ymm10[8],ymm11[9],ymm10[9],ymm11[10],ymm10[10],ymm11[11],ymm10[11],ymm11[12],ymm10[12],ymm11[13],ymm10[13],ymm11[14],ymm10[14],ymm11[15],ymm10[15],ymm11[24],ymm10[24],ymm11[25],ymm10[25],ymm11[26],ymm10[26],ymm11[27],ymm10[27],ymm11[28],ymm10[28],ymm11[29],ymm10[29],ymm11[30],ymm10[30],ymm11[31],ymm10[31]
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
-; AVX512F-FAST-NEXT:    vpermt2q %zmm2, %zmm22, %zmm12
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm16, %zmm12
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [8,9,6,7,12,13,10,11,14,15,14,15,14,15,14,15]
+; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm13
+; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm29
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm26, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm27, %ymm1
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,22,23,20,21,18,19,24,25,26,27,28,29,26,27>
+; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm5, %ymm8
+; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm6, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm18
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,10,u,13,u,12,u,11,u,14,u,13,u,14,u,15>
+; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm12, %xmm12
+; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm11, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm19
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,2,u,1,u,0,u,3,u,u,u,u,u,4,u,u,u,2,u,1,u,0,u,3,u,u,u,u,u,4,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm10, %ymm10
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm30, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm30
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <2,u,1,u,0,u,3,u,u,u,u,u,4,u,u,u,2,u,1,u,0,u,3,u,u,u,u,u,4,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm7
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm0
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm0[2,u,1,u,0,u,3,u,u,u,u,u,4,u,u,u,18,u,17,u,16,u,19,u,u,u,u,u,20,u,u,u]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm29
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <10,u,13,u,12,u,11,u,14,u,u,u,u,u,15,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm4
-; AVX512F-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm21
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm14, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm31
-; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm14, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm20
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,2,u,1,u,0,u,3,u,u,u,u,u,4,u,u,u,2,u,1,u,0,u,3,u,u,u,u,u,4,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm1, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
-; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm2, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm17
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm2, %ymm14
-; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm14
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm23, %xmm1
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm30, %xmm4
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm2
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
-; AVX512F-FAST-NEXT:    vprold $16, %xmm2, %xmm23
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,0,0,1,8,8,8,9]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm23
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm13, %xmm2
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm6, %xmm4
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,6,7,4,5,2,3,8,9,10,11,12,13,10,11]
-; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm2, %xmm5
-; AVX512F-FAST-NEXT:    vpermt2q %zmm4, %zmm22, %zmm5
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %xmm13
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,6,u,5,u,8,u,7,u,9,u,9,u,9,u,9>
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm13, %xmm7
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm18
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <u,2,u,1,u,0,u,3,u,4,u,4,u,4,u,4>
-; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm13, %xmm4
-; AVX512F-FAST-NEXT:    vpermt2q %zmm7, %zmm22, %zmm4
-; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm7
-; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm3
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3],xmm3[4],xmm7[4],xmm3[5],xmm7[5],xmm3[6],xmm7[6],xmm3[7],xmm7[7]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX512F-FAST-NEXT:    vprold $16, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vpermt2q %zmm3, %zmm22, %zmm7
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm24, %xmm2
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm3[8],xmm0[9],xmm3[9],xmm0[10],xmm3[10],xmm0[11],xmm3[11],xmm0[12],xmm3[12],xmm0[13],xmm3[13],xmm0[14],xmm3[14],xmm0[15],xmm3[15]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm3, %xmm6
-; AVX512F-FAST-NEXT:    vpermt2q %zmm0, %zmm22, %zmm6
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm11
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <10,u,13,u,12,u,11,u,14,u,u,u,u,u,15,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm15
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm15, %ymm17
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm15
+; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm6
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm16
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm3, %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm2, %xmm1
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX512F-FAST-NEXT:    vprold $16, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm26
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm14, %xmm1
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm2
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
+; AVX512F-FAST-NEXT:    vprold $16, %xmm2, %xmm2
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm27
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm2
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <10,u,13,u,12,u,11,u,14,u,13,u,14,u,15,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm8
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm9
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm3, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm30
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <10,u,13,u,12,u,11,u,14,u,13,u,14,u,15,u>
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm2, %xmm5
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <6,u,5,u,8,u,7,u,9,u,9,u,9,u,9,u>
+; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm3, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <2,u,1,u,0,u,3,u,4,u,4,u,4,u,4,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm16
-; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm11
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,10,u,13,u,12,u,11,u,14,u,13,u,14,u,15>
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm13, %xmm13
-; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm0
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm18, %xmm1
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm1
-; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,0,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm24 = ymm17[2,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm25 = ymm25[0,0,0,1]
-; AVX512F-FAST-NEXT:    vprold $16, %ymm26, %ymm26
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm27 = ymm27[0,0,0,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[2,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,0,1]
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm3
+; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm2, %xmm14
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm9[0,0,0,1]
+; AVX512F-FAST-NEXT:    vprold $16, %ymm20, %ymm9
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm2, %zmm2
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm13, %zmm8
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm9, %zmm8
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm8, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm2, %ymm13, %ymm7
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm8, %ymm2, %ymm5
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm7[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm12, %zmm7
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm8, %zmm7
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm31[0,0,0,1]
+; AVX512F-FAST-NEXT:    vprold $16, %ymm28, %ymm10
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm5, %zmm5
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm29[0,0,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm18[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm28 = ymm19[0,0,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm29 = ymm30[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm17[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm31 = ymm16[2,2,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,0,1]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
-; AVX512F-FAST-NEXT:    vpermt2q %zmm1, %zmm22, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm7, %zmm1, %zmm6
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm6, %ymm22
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm7, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm22, %ymm6, %ymm11
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm11[0,1,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm11, %zmm0
-; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm23, %zmm1, %zmm5
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm29[2,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm9[0,0,0,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm13[0,0,0,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm19[2,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm21[2,2,2,3]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm15, %ymm6, %ymm13
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm15, %ymm15
-; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm22 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm22 = mem[2,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm23 = ymm20[2,2,2,3]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm12, %ymm6, %ymm23
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm12, %ymm12
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm29 = ymm31[2,2,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm16[0,0,0,1]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm5, %ymm7, %ymm10
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm5, %ymm5
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm30[0,0,0,1]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm5, %ymm6, %ymm30
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm10[0,1,2,3],zmm30[0,1,2,3]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm11, %zmm4
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm4, 192(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm12, %ymm4, %ymm29
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm23[0,1,2,3],zmm29[0,1,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm6, %zmm14
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm14, 128(%rax)
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm15, %ymm4, %ymm22
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm13[0,1,2,3],zmm22[0,1,2,3]
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm6, %zmm10
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm10, 320(%rax)
-; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm5 = mem[2,2,2,3]
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm5
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm11, %zmm6
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm26[2,2,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm25, %zmm5
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm28, %zmm27, %zmm10
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm11, %zmm10
-; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm6, %ymm5
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm11 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm5, %ymm7, %ymm11
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm10, %zmm10
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm9, %zmm10
 ; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm10, %ymm5
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm5, %ymm7, %ymm0
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm6, %ymm4, %ymm8
-; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm10, %ymm4, %ymm1
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm8[0,1,2,3],zmm11[0,1,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm2, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm5, %zmm2
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[0,1,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm3, %zmm1
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm5, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 256(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, 64(%rax)
-; AVX512F-FAST-NEXT:    addq $280, %rsp # imm = 0x118
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm5, %ymm13, %ymm11
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm10, %ymm2, %ymm4
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm5
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm5[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm29, %zmm28, %zmm5
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm8, %zmm5
+; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm4 = mem[2,2,2,3,6,6,6,7]
+; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm8 = mem[2,2,2,3,6,6,6,7]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm9, %zmm8
+; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm4 = mem[2,2,2,3,6,6,6,7]
+; AVX512F-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm10 = mem[2,2,2,3,6,6,6,7]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm9, %zmm10
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm8, %ymm4
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm4, %ymm2, %ymm30
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm10, %ymm4
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm4, %ymm2, %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm8, %ymm2, %ymm15
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm0, %zmm4
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm15[0,1,2,3],zmm4[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermq $234, (%rsp), %zmm8 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm8 = mem[2,2,2,3,6,6,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm9, %zmm8
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm10, %ymm2, %ymm31
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm4
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm31[0,1,2,3],zmm4[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm6 = zmm24[2,2,2,3,6,6,6,7]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm9, %zmm6
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm4 = zmm26[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm9 = zmm25[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm10, %zmm9
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm4 = zmm27[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm11 = zmm21[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm10, %zmm11
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm9, %ymm4
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm4, %ymm2, %ymm0
+; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm11, %ymm4
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm4, %ymm2, %ymm14
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm9, %ymm13, %ymm3
+; AVX512F-FAST-NEXT:    vpternlogq $184, %ymm11, %ymm13, %ymm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm3[0,1,2,3],zmm0[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm2 = zmm23[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,255]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm2
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm22[0,0,0,1,4,4,4,5]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm1
+; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, 192(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm6, 128(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, 320(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm5, 256(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, 64(%rax)
+; AVX512F-FAST-NEXT:    addq $200, %rsp
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -4584,24 +4606,25 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%r9), %zmm12
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rsi), %ymm16
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm17
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm17[0],ymm16[0],ymm17[1],ymm16[1],ymm17[2],ymm16[2],ymm17[3],ymm16[3],ymm17[4],ymm16[4],ymm17[5],ymm16[5],ymm17[6],ymm16[6],ymm17[7],ymm16[7],ymm17[16],ymm16[16],ymm17[17],ymm16[17],ymm17[18],ymm16[18],ymm17[19],ymm16[19],ymm17[20],ymm16[20],ymm17[21],ymm16[21],ymm17[22],ymm16[22],ymm17[23],ymm16[23]
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm17[0],ymm16[0],ymm17[1],ymm16[1],ymm17[2],ymm16[2],ymm17[3],ymm16[3],ymm17[4],ymm16[4],ymm17[5],ymm16[5],ymm17[6],ymm16[6],ymm17[7],ymm16[7],ymm17[16],ymm16[16],ymm17[17],ymm16[17],ymm17[18],ymm16[18],ymm17[19],ymm16[19],ymm17[20],ymm16[20],ymm17[21],ymm16[21],ymm17[22],ymm16[22],ymm17[23],ymm16[23]
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%rsi), %xmm1
 ; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm9
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
 ; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm11
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,40,43,42,41,40,43,42,41,40,43,42,41,44,45,46,45]
-; AVX512BW-SLOW-NEXT:    vpermt2w %zmm2, %zmm18, %zmm0
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,24,27,26,25,24,27,26,25,24,27,26,25,28,29,30,29]
+; AVX512BW-SLOW-NEXT:    vpermw %zmm0, %zmm7, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%rcx), %xmm2
 ; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm8
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%rdx), %xmm4
 ; AVX512BW-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm10
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm5, %ymm23, %ymm5
-; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rcx), %ymm19
-; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdx), %ymm20
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm20[0],ymm19[0],ymm20[1],ymm19[1],ymm20[2],ymm19[2],ymm20[3],ymm19[3],ymm20[4],ymm19[4],ymm20[5],ymm19[5],ymm20[6],ymm19[6],ymm20[7],ymm19[7],ymm20[16],ymm19[16],ymm20[17],ymm19[17],ymm20[18],ymm19[18],ymm20[19],ymm19[19],ymm20[20],ymm19[20],ymm20[21],ymm19[21],ymm20[22],ymm19[22],ymm20[23],ymm19[23]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm5, %ymm20, %ymm5
+; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rcx), %ymm18
+; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdx), %ymm19
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm19[0],ymm18[0],ymm19[1],ymm18[1],ymm19[2],ymm18[2],ymm19[3],ymm18[3],ymm19[4],ymm18[4],ymm19[5],ymm18[5],ymm19[6],ymm18[6],ymm19[7],ymm18[7],ymm19[16],ymm18[16],ymm19[17],ymm18[17],ymm19[18],ymm18[18],ymm19[19],ymm18[19],ymm19[20],ymm18[20],ymm19[21],ymm18[21],ymm19[22],ymm18[22],ymm19[23],ymm18[23]
 ; AVX512BW-SLOW-NEXT:    vprold $16, %ymm6, %ymm6
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
 ; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm5
@@ -4611,110 +4634,111 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%r8), %xmm5
 ; AVX512BW-SLOW-NEXT:    vmovdqa 32(%r8), %xmm13
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm6, %ymm24, %ymm6
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm6, %ymm23, %ymm6
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%r8), %ymm21
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm25 = <2,u,1,u,0,u,3,u,u,u,u,u,4,u,u,u,2,u,1,u,0,u,3,u,u,u,u,u,4,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm25, %ymm21, %ymm7
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm24 = <2,u,1,u,0,u,3,u,u,u,u,u,4,u,u,u,2,u,1,u,0,u,3,u,u,u,u,u,4,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm24, %ymm21, %ymm15
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm6, %zmm6
 ; AVX512BW-SLOW-NEXT:    movl $1227133513, %r10d # imm = 0x49249249
 ; AVX512BW-SLOW-NEXT:    kmovd %r10d, %k2
 ; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm6, %zmm0 {%k2}
 ; AVX512BW-SLOW-NEXT:    vmovdqa (%r9), %xmm6
 ; AVX512BW-SLOW-NEXT:    vmovdqa 32(%r9), %xmm15
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm7, %ymm24, %ymm7
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm22 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm22, %ymm23, %ymm25
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%r9), %ymm22
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm26 = <u,2,u,1,u,0,u,3,u,u,u,u,u,4,u,u,u,2,u,1,u,0,u,3,u,u,u,u,u,4,u,u>
 ; AVX512BW-SLOW-NEXT:    vpshufb %ymm26, %ymm22, %ymm27
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm27 = ymm27[2,2,2,3]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm27, %zmm7, %zmm7
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm27, %zmm25, %zmm25
 ; AVX512BW-SLOW-NEXT:    movabsq $2342443691899625602, %r10 # imm = 0x2082082082082082
 ; AVX512BW-SLOW-NEXT:    kmovq %r10, %k3
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm7, %zmm0 {%k3}
-; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rsi), %ymm27
-; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rdi), %ymm28
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm29 = ymm28[0],ymm27[0],ymm28[1],ymm27[1],ymm28[2],ymm27[2],ymm28[3],ymm27[3],ymm28[4],ymm27[4],ymm28[5],ymm27[5],ymm28[6],ymm27[6],ymm28[7],ymm27[7],ymm28[16],ymm27[16],ymm28[17],ymm27[17],ymm28[18],ymm27[18],ymm28[19],ymm27[19],ymm28[20],ymm27[20],ymm28[21],ymm27[21],ymm28[22],ymm27[22],ymm28[23],ymm27[23]
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm11[8],xmm9[8],xmm11[9],xmm9[9],xmm11[10],xmm9[10],xmm11[11],xmm9[11],xmm11[12],xmm9[12],xmm11[13],xmm9[13],xmm11[14],xmm9[14],xmm11[15],xmm9[15]
-; AVX512BW-SLOW-NEXT:    vpermt2w %zmm29, %zmm18, %zmm7
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm18 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm18, %ymm23, %ymm18
-; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rcx), %ymm23
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm25, %zmm0 {%k3}
+; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rsi), %ymm25
+; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rdi), %ymm27
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm28 = ymm27[0],ymm25[0],ymm27[1],ymm25[1],ymm27[2],ymm25[2],ymm27[3],ymm25[3],ymm27[4],ymm25[4],ymm27[5],ymm25[5],ymm27[6],ymm25[6],ymm27[7],ymm25[7],ymm27[16],ymm25[16],ymm27[17],ymm25[17],ymm27[18],ymm25[18],ymm27[19],ymm25[19],ymm27[20],ymm25[20],ymm27[21],ymm25[21],ymm27[22],ymm25[22],ymm27[23],ymm25[23]
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm29 = xmm11[8],xmm9[8],xmm11[9],xmm9[9],xmm11[10],xmm9[10],xmm11[11],xmm9[11],xmm11[12],xmm9[12],xmm11[13],xmm9[13],xmm11[14],xmm9[14],xmm11[15],xmm9[15]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm28, %zmm29, %zmm28
+; AVX512BW-SLOW-NEXT:    vpermw %zmm28, %zmm7, %zmm7
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm28 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm28, %ymm20, %ymm20
+; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rcx), %ymm28
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%rdx), %ymm29
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm30 = ymm29[0],ymm23[0],ymm29[1],ymm23[1],ymm29[2],ymm23[2],ymm29[3],ymm23[3],ymm29[4],ymm23[4],ymm29[5],ymm23[5],ymm29[6],ymm23[6],ymm29[7],ymm23[7],ymm29[16],ymm23[16],ymm29[17],ymm23[17],ymm29[18],ymm23[18],ymm29[19],ymm23[19],ymm29[20],ymm23[20],ymm29[21],ymm23[21],ymm29[22],ymm23[22],ymm29[23],ymm23[23]
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm30 = ymm29[0],ymm28[0],ymm29[1],ymm28[1],ymm29[2],ymm28[2],ymm29[3],ymm28[3],ymm29[4],ymm28[4],ymm29[5],ymm28[5],ymm29[6],ymm28[6],ymm29[7],ymm28[7],ymm29[16],ymm28[16],ymm29[17],ymm28[17],ymm29[18],ymm28[18],ymm29[19],ymm28[19],ymm29[20],ymm28[20],ymm29[21],ymm28[21],ymm29[22],ymm28[22],ymm29[23],ymm28[23]
 ; AVX512BW-SLOW-NEXT:    vprold $16, %ymm30, %ymm30
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm30[2,2,2,3]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm18, %zmm18
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm18, %zmm7 {%k1}
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm18 = xmm13[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm18, %ymm24, %ymm18
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm30, %zmm20, %zmm20
+; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm20, %zmm7 {%k1}
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm20 = xmm13[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm20, %ymm23, %ymm20
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%r8), %ymm30
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm25, %ymm30, %ymm25
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm25[2,2,2,3]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm25, %zmm18, %zmm18
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm18, %zmm7 {%k2}
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm18 = xmm15[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm18, %ymm24, %ymm18
-; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%r9), %ymm24
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm26, %ymm24, %ymm25
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm25[2,2,2,3]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm25, %zmm18, %zmm18
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm18, %zmm7 {%k3}
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm25 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm25, %ymm27, %ymm18
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm25, %ymm28, %ymm26
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm18 = ymm26[0],ymm18[0],ymm26[1],ymm18[1],ymm26[2],ymm18[2],ymm26[3],ymm18[3],ymm26[4],ymm18[4],ymm26[5],ymm18[5],ymm26[6],ymm18[6],ymm26[7],ymm18[7],ymm26[16],ymm18[16],ymm26[17],ymm18[17],ymm26[18],ymm18[18],ymm26[19],ymm18[19],ymm26[20],ymm18[20],ymm26[21],ymm18[21],ymm26[22],ymm18[22],ymm26[23],ymm18[23]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm18[2,2,2,3]
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm26 = ymm28[8],ymm27[8],ymm28[9],ymm27[9],ymm28[10],ymm27[10],ymm28[11],ymm27[11],ymm28[12],ymm27[12],ymm28[13],ymm27[13],ymm28[14],ymm27[14],ymm28[15],ymm27[15],ymm28[24],ymm27[24],ymm28[25],ymm27[25],ymm28[26],ymm27[26],ymm28[27],ymm27[27],ymm28[28],ymm27[28],ymm28[29],ymm27[29],ymm28[30],ymm27[30],ymm28[31],ymm27[31]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm26, %ymm27, %ymm26
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm26, %zmm18, %zmm26
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm28 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm28, %ymm23, %ymm18
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm28, %ymm29, %ymm31
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm18 = ymm31[0],ymm18[0],ymm31[1],ymm18[1],ymm31[2],ymm18[2],ymm31[3],ymm18[3],ymm31[4],ymm18[4],ymm31[5],ymm18[5],ymm31[6],ymm18[6],ymm31[7],ymm18[7],ymm31[16],ymm18[16],ymm31[17],ymm18[17],ymm31[18],ymm18[18],ymm31[19],ymm18[19],ymm31[20],ymm18[20],ymm31[21],ymm18[21],ymm31[22],ymm18[22],ymm31[23],ymm18[23]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm18[2,2,2,3]
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm23 = ymm29[8],ymm23[8],ymm29[9],ymm23[9],ymm29[10],ymm23[10],ymm29[11],ymm23[11],ymm29[12],ymm23[12],ymm29[13],ymm23[13],ymm29[14],ymm23[14],ymm29[15],ymm23[15],ymm29[24],ymm23[24],ymm29[25],ymm23[25],ymm29[26],ymm23[26],ymm29[27],ymm23[27],ymm29[28],ymm23[28],ymm29[29],ymm23[29],ymm29[30],ymm23[30],ymm29[31],ymm23[31]
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm24, %ymm30, %ymm24
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[2,2,2,3]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm20, %zmm20
+; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm20, %zmm7 {%k2}
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm20 = xmm15[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm20, %ymm23, %ymm20
+; AVX512BW-SLOW-NEXT:    vmovdqa64 32(%r9), %ymm23
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm26, %ymm23, %ymm24
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm24 = ymm24[2,2,2,3]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm20, %zmm20
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm20, %zmm7 {%k3}
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm24 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm24, %ymm25, %ymm20
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm24, %ymm27, %ymm26
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm20 = ymm26[0],ymm20[0],ymm26[1],ymm20[1],ymm26[2],ymm20[2],ymm26[3],ymm20[3],ymm26[4],ymm20[4],ymm26[5],ymm20[5],ymm26[6],ymm20[6],ymm26[7],ymm20[7],ymm26[16],ymm20[16],ymm26[17],ymm20[17],ymm26[18],ymm20[18],ymm26[19],ymm20[19],ymm26[20],ymm20[20],ymm26[21],ymm20[21],ymm26[22],ymm20[22],ymm26[23],ymm20[23]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm20[2,2,2,3]
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm25 = ymm27[8],ymm25[8],ymm27[9],ymm25[9],ymm27[10],ymm25[10],ymm27[11],ymm25[11],ymm27[12],ymm25[12],ymm27[13],ymm25[13],ymm27[14],ymm25[14],ymm27[15],ymm25[15],ymm27[24],ymm25[24],ymm27[25],ymm25[25],ymm27[26],ymm25[26],ymm27[27],ymm25[27],ymm27[28],ymm25[28],ymm27[29],ymm25[29],ymm27[30],ymm25[30],ymm27[31],ymm25[31]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm25, %ymm26, %ymm25
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm25, %zmm20, %zmm25
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm27 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm27, %ymm28, %ymm20
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm27, %ymm29, %ymm31
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm20 = ymm31[0],ymm20[0],ymm31[1],ymm20[1],ymm31[2],ymm20[2],ymm31[3],ymm20[3],ymm31[4],ymm20[4],ymm31[5],ymm20[5],ymm31[6],ymm20[6],ymm31[7],ymm20[7],ymm31[16],ymm20[16],ymm31[17],ymm20[17],ymm31[18],ymm20[18],ymm31[19],ymm20[19],ymm31[20],ymm20[20],ymm31[21],ymm20[21],ymm31[22],ymm20[22],ymm31[23],ymm20[23]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm20[2,2,2,3]
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm28 = ymm29[8],ymm28[8],ymm29[9],ymm28[9],ymm29[10],ymm28[10],ymm29[11],ymm28[11],ymm29[12],ymm28[12],ymm29[13],ymm28[13],ymm29[14],ymm28[14],ymm29[15],ymm28[15],ymm29[24],ymm28[24],ymm29[25],ymm28[25],ymm29[26],ymm28[26],ymm29[27],ymm28[27],ymm29[28],ymm28[28],ymm29[29],ymm28[29],ymm29[30],ymm28[30],ymm29[31],ymm28[31]
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm23, %ymm29, %ymm23
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm18, %zmm18
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm26, %zmm18 {%k1}
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm30[0,1,2,3],zmm14[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15,u>
-; AVX512BW-SLOW-NEXT:    vpshufb %zmm26, %zmm23, %zmm23
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm23 = zmm23[2,2,2,3,6,6,6,7]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm28, %ymm29, %ymm28
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm28, %zmm20, %zmm20
+; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm25, %zmm20 {%k1}
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm25 = zmm30[0,1,2,3],zmm14[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15,u>
+; AVX512BW-SLOW-NEXT:    vpshufb %zmm28, %zmm25, %zmm25
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm25 = zmm25[2,2,2,3,6,6,6,7]
 ; AVX512BW-SLOW-NEXT:    movl $-1840700270, %ecx # imm = 0x92492492
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k2
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm23, %zmm18 {%k2}
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm24[0,1,2,3],zmm12[4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm24 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15>
-; AVX512BW-SLOW-NEXT:    vpshufb %zmm24, %zmm23, %zmm23
+; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm25, %zmm20 {%k2}
+; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm23[0,1,2,3],zmm12[4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15>
+; AVX512BW-SLOW-NEXT:    vpshufb %zmm25, %zmm23, %zmm23
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm23 = zmm23[2,2,2,3,6,6,6,7]
 ; AVX512BW-SLOW-NEXT:    movabsq $-9076969306111049208, %rcx # imm = 0x8208208208208208
 ; AVX512BW-SLOW-NEXT:    kmovq %rcx, %k3
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm23, %zmm18 {%k3}
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm25, %ymm16, %ymm23
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm25, %ymm17, %ymm25
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm23 = ymm25[0],ymm23[0],ymm25[1],ymm23[1],ymm25[2],ymm23[2],ymm25[3],ymm23[3],ymm25[4],ymm23[4],ymm25[5],ymm23[5],ymm25[6],ymm23[6],ymm25[7],ymm23[7],ymm25[16],ymm23[16],ymm25[17],ymm23[17],ymm25[18],ymm23[18],ymm25[19],ymm23[19],ymm25[20],ymm23[20],ymm25[21],ymm23[21],ymm25[22],ymm23[22],ymm25[23],ymm23[23]
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm23, %zmm20 {%k3}
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm24, %ymm16, %ymm23
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm24, %ymm17, %ymm24
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm23 = ymm24[0],ymm23[0],ymm24[1],ymm23[1],ymm24[2],ymm23[2],ymm24[3],ymm23[3],ymm24[4],ymm23[4],ymm24[5],ymm23[5],ymm24[6],ymm23[6],ymm24[7],ymm23[7],ymm24[16],ymm23[16],ymm24[17],ymm23[17],ymm24[18],ymm23[18],ymm24[19],ymm23[19],ymm24[20],ymm23[20],ymm24[21],ymm23[21],ymm24[22],ymm23[22],ymm24[23],ymm23[23]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[2,2,2,3]
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm16 = ymm17[8],ymm16[8],ymm17[9],ymm16[9],ymm17[10],ymm16[10],ymm17[11],ymm16[11],ymm17[12],ymm16[12],ymm17[13],ymm16[13],ymm17[14],ymm16[14],ymm17[15],ymm16[15],ymm17[24],ymm16[24],ymm17[25],ymm16[25],ymm17[26],ymm16[26],ymm17[27],ymm16[27],ymm17[28],ymm16[28],ymm17[29],ymm16[29],ymm17[30],ymm16[30],ymm17[31],ymm16[31]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm16, %ymm27, %ymm16
+; AVX512BW-SLOW-NEXT:    vpermw %ymm16, %ymm26, %ymm16
 ; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm23, %zmm17
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm28, %ymm19, %ymm16
-; AVX512BW-SLOW-NEXT:    vpshufb %ymm28, %ymm20, %ymm23
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm27, %ymm18, %ymm16
+; AVX512BW-SLOW-NEXT:    vpshufb %ymm27, %ymm19, %ymm23
 ; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm16 = ymm23[0],ymm16[0],ymm23[1],ymm16[1],ymm23[2],ymm16[2],ymm23[3],ymm16[3],ymm23[4],ymm16[4],ymm23[5],ymm16[5],ymm23[6],ymm16[6],ymm23[7],ymm16[7],ymm23[16],ymm16[16],ymm23[17],ymm16[17],ymm23[18],ymm16[18],ymm23[19],ymm16[19],ymm23[20],ymm16[20],ymm23[21],ymm16[21],ymm23[22],ymm16[22],ymm23[23],ymm16[23]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm16[2,2,2,3]
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm19 = ymm20[8],ymm19[8],ymm20[9],ymm19[9],ymm20[10],ymm19[10],ymm20[11],ymm19[11],ymm20[12],ymm19[12],ymm20[13],ymm19[13],ymm20[14],ymm19[14],ymm20[15],ymm19[15],ymm20[24],ymm19[24],ymm20[25],ymm19[25],ymm20[26],ymm19[26],ymm20[27],ymm19[27],ymm20[28],ymm19[28],ymm20[29],ymm19[29],ymm20[30],ymm19[30],ymm20[31],ymm19[31]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm19, %ymm29, %ymm19
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm16, %zmm16
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm18 = ymm19[8],ymm18[8],ymm19[9],ymm18[9],ymm19[10],ymm18[10],ymm19[11],ymm18[11],ymm19[12],ymm18[12],ymm19[13],ymm18[13],ymm19[14],ymm18[14],ymm19[15],ymm18[15],ymm19[24],ymm18[24],ymm19[25],ymm18[25],ymm19[26],ymm18[26],ymm19[27],ymm18[27],ymm19[28],ymm18[28],ymm19[29],ymm18[29],ymm19[30],ymm18[30],ymm19[31],ymm18[31]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm18, %ymm29, %ymm18
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm18, %zmm16, %zmm16
 ; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm17, %zmm16 {%k1}
 ; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm21, %zmm14, %zmm14
-; AVX512BW-SLOW-NEXT:    vpshufb %zmm26, %zmm14, %zmm14
+; AVX512BW-SLOW-NEXT:    vpshufb %zmm28, %zmm14, %zmm14
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm14 = zmm14[2,2,2,3,6,6,6,7]
 ; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm14, %zmm16 {%k2}
 ; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm22, %zmm12, %zmm12
-; AVX512BW-SLOW-NEXT:    vpshufb %zmm24, %zmm12, %zmm12
+; AVX512BW-SLOW-NEXT:    vpshufb %zmm25, %zmm12, %zmm12
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm12 = zmm12[2,2,2,3,6,6,6,7]
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm12, %zmm16 {%k3}
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u>
@@ -4728,8 +4752,8 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm9, %zmm9
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
 ; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm8, %xmm17
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm10, %xmm19
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm17 = xmm19[0],xmm17[0],xmm19[1],xmm17[1],xmm19[2],xmm17[2],xmm19[3],xmm17[3],xmm19[4],xmm17[4],xmm19[5],xmm17[5],xmm19[6],xmm17[6],xmm19[7],xmm17[7]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm14, %xmm10, %xmm18
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm17 = xmm18[0],xmm17[0],xmm18[1],xmm17[1],xmm18[2],xmm17[2],xmm18[3],xmm17[3],xmm18[4],xmm17[4],xmm18[5],xmm17[5],xmm18[6],xmm17[6],xmm18[7],xmm17[7]
 ; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3],xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
 ; AVX512BW-SLOW-NEXT:    vprold $16, %xmm8, %xmm8
 ; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm17, %zmm8, %zmm8
@@ -4738,19 +4762,20 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vpmovzxbw {{.*#+}} xmm8 = xmm13[0],zero,xmm13[1],zero,xmm13[2],zero,xmm13[3],zero,xmm13[4],zero,xmm13[5],zero,xmm13[6],zero,xmm13[7],zero
 ; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm13[2,1,2,3]
 ; AVX512BW-SLOW-NEXT:    vpmovzxbw {{.*#+}} xmm10 = xmm10[0],zero,xmm10[1],zero,xmm10[2],zero,xmm10[3],zero,xmm10[4],zero,xmm10[5],zero,xmm10[6],zero,xmm10[7],zero
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4,38,37,32,39,38,37,32,39,38,37,32,39,33,33,33,33]
-; AVX512BW-SLOW-NEXT:    vpermt2w %zmm10, %zmm13, %zmm8
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm8, %zmm9 {%k1}
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm15[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm15[2,1,2,3]
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm10[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BW-SLOW-NEXT:    vpermt2w %zmm10, %zmm13, %zmm8
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm10, %zmm8, %zmm8
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4,22,21,16,23,22,21,16,23,22,21,16,23,17,17,17,17]
+; AVX512BW-SLOW-NEXT:    vpermw %zmm8, %zmm10, %zmm9 {%k1}
+; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm15[2,1,2,3]
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm8[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm15[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm8, %zmm13, %zmm8
+; AVX512BW-SLOW-NEXT:    vpermw %zmm8, %zmm10, %zmm8
 ; AVX512BW-SLOW-NEXT:    movabsq $585610922974906400, %rcx # imm = 0x820820820820820
 ; AVX512BW-SLOW-NEXT:    kmovq %rcx, %k3
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm8, %zmm9 {%k3}
 ; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm1, %xmm8
-; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm3, %xmm10
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
+; AVX512BW-SLOW-NEXT:    vpshufb %xmm12, %xmm3, %xmm12
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm12[8],xmm8[8],xmm12[9],xmm8[9],xmm12[10],xmm8[10],xmm12[11],xmm8[11],xmm12[12],xmm8[12],xmm12[13],xmm8[13],xmm12[14],xmm8[14],xmm12[15],xmm8[15]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,0,1]
 ; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
 ; AVX512BW-SLOW-NEXT:    vpermw %ymm1, %ymm11, %ymm1
@@ -4766,17 +4791,18 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
 ; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm5[2,1,2,3]
 ; AVX512BW-SLOW-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX512BW-SLOW-NEXT:    vpermt2w %zmm3, %zmm13, %zmm2
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %zmm2, %zmm1 {%k1}
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm6[2,1,2,3]
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BW-SLOW-NEXT:    vpermt2w %zmm3, %zmm13, %zmm2
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm2, %zmm2
+; AVX512BW-SLOW-NEXT:    vpermw %zmm2, %zmm10, %zmm1 {%k1}
+; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm6[2,1,2,3]
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm3, %zmm2
+; AVX512BW-SLOW-NEXT:    vpermw %zmm2, %zmm10, %zmm2
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm2, %zmm1 {%k3}
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm1, (%rax)
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm9, 192(%rax)
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm16, 128(%rax)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm18, 320(%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm20, 320(%rax)
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm7, 256(%rax)
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, 64(%rax)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
@@ -4784,203 +4810,203 @@ define void @store_i8_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512BW-FAST-LABEL: store_i8_stride6_vf64:
 ; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%r8), %zmm11
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%r9), %zmm12
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%r8), %zmm9
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%r9), %zmm10
 ; AVX512BW-FAST-NEXT:    vmovdqa 32(%rsi), %ymm1
-; AVX512BW-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,u>
 ; AVX512BW-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm0
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %ymm20
-; AVX512BW-FAST-NEXT:    vpshufb %ymm7, %ymm20, %ymm3
+; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdi), %ymm2
+; AVX512BW-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm3
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm20[8],ymm1[8],ymm20[9],ymm1[9],ymm20[10],ymm1[10],ymm20[11],ymm1[11],ymm20[12],ymm1[12],ymm20[13],ymm1[13],ymm20[14],ymm1[14],ymm20[15],ymm1[15],ymm20[24],ymm1[24],ymm20[25],ymm1[25],ymm20[26],ymm1[26],ymm20[27],ymm1[27],ymm20[28],ymm1[28],ymm20[29],ymm1[29],ymm20[30],ymm1[30],ymm20[31],ymm1[31]
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
-; AVX512BW-FAST-NEXT:    vpermw %ymm3, %ymm10, %ymm3
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15],ymm2[24],ymm1[24],ymm2[25],ymm1[25],ymm2[26],ymm1[26],ymm2[27],ymm1[27],ymm2[28],ymm1[28],ymm2[29],ymm1[29],ymm2[30],ymm1[30],ymm2[31],ymm1[31]
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [12,11,14,13,12,11,14,13,12,11,14,13,15,15,15,15]
+; AVX512BW-FAST-NEXT:    vpermw %ymm3, %ymm8, %ymm3
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
 ; AVX512BW-FAST-NEXT:    vmovdqa 32(%rcx), %ymm5
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %ymm13, %ymm5, %ymm0
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u,5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
+; AVX512BW-FAST-NEXT:    vpshufb %ymm12, %ymm5, %ymm0
 ; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdx), %ymm6
-; AVX512BW-FAST-NEXT:    vpshufb %ymm13, %ymm6, %ymm4
+; AVX512BW-FAST-NEXT:    vpshufb %ymm12, %ymm6, %ymm4
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[16],ymm0[16],ymm4[17],ymm0[17],ymm4[18],ymm0[18],ymm4[19],ymm0[19],ymm4[20],ymm0[20],ymm4[21],ymm0[21],ymm4[22],ymm0[22],ymm4[23],ymm0[23]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm6[8],ymm5[8],ymm6[9],ymm5[9],ymm6[10],ymm5[10],ymm6[11],ymm5[11],ymm6[12],ymm5[12],ymm6[13],ymm5[13],ymm6[14],ymm5[14],ymm6[15],ymm5[15],ymm6[24],ymm5[24],ymm6[25],ymm5[25],ymm6[26],ymm5[26],ymm6[27],ymm5[27],ymm6[28],ymm5[28],ymm6[29],ymm5[29],ymm6[30],ymm5[30],ymm6[31],ymm5[31]
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm14 = [13,12,11,14,13,12,11,14,13,12,11,14,15,15,15,15]
 ; AVX512BW-FAST-NEXT:    vpermw %ymm4, %ymm14, %ymm4
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm1
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    movl $613566756, %eax # imm = 0x24924924
 ; AVX512BW-FAST-NEXT:    kmovd %eax, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm3, %zmm1 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%r8), %ymm0
-; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm0[0,1,2,3],zmm11[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm3, %zmm0 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa 32(%r8), %ymm3
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm3[0,1,2,3],zmm9[4,5,6,7]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15,u>
 ; AVX512BW-FAST-NEXT:    vpshufb %zmm15, %zmm4, %zmm4
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm4 = zmm4[2,2,2,3,6,6,6,7]
 ; AVX512BW-FAST-NEXT:    movl $-1840700270, %eax # imm = 0x92492492
 ; AVX512BW-FAST-NEXT:    kmovd %eax, %k2
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm4, %zmm1 {%k2}
+; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm4, %zmm0 {%k2}
 ; AVX512BW-FAST-NEXT:    vmovdqa 32(%r9), %ymm4
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm4[0,1,2,3],zmm12[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm11 = zmm4[0,1,2,3],zmm10[4,5,6,7]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,u,5,u,8,u,7,u,u,u,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,10,u,13,u,12,u,11,u,14,u,u,u,u,u,15>
-; AVX512BW-FAST-NEXT:    vpshufb %zmm16, %zmm8, %zmm8
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm8 = zmm8[2,2,2,3,6,6,6,7]
+; AVX512BW-FAST-NEXT:    vpshufb %zmm16, %zmm11, %zmm11
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm11 = zmm11[2,2,2,3,6,6,6,7]
 ; AVX512BW-FAST-NEXT:    movabsq $-9076969306111049208, %rax # imm = 0x8208208208208208
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm8, %zmm1 {%k3}
-; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %ymm8
-; AVX512BW-FAST-NEXT:    vpshufb %ymm7, %ymm8, %ymm17
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %ymm9
-; AVX512BW-FAST-NEXT:    vpshufb %ymm7, %ymm9, %ymm7
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm11, %zmm0 {%k3}
+; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %ymm11
+; AVX512BW-FAST-NEXT:    vpshufb %ymm7, %ymm11, %ymm17
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %ymm13
+; AVX512BW-FAST-NEXT:    vpshufb %ymm7, %ymm13, %ymm7
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm7 = ymm7[0],ymm17[0],ymm7[1],ymm17[1],ymm7[2],ymm17[2],ymm7[3],ymm17[3],ymm7[4],ymm17[4],ymm7[5],ymm17[5],ymm7[6],ymm17[6],ymm7[7],ymm17[7],ymm7[16],ymm17[16],ymm7[17],ymm17[17],ymm7[18],ymm17[18],ymm7[19],ymm17[19],ymm7[20],ymm17[20],ymm7[21],ymm17[21],ymm7[22],ymm17[22],ymm7[23],ymm17[23]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm17 = ymm9[8],ymm8[8],ymm9[9],ymm8[9],ymm9[10],ymm8[10],ymm9[11],ymm8[11],ymm9[12],ymm8[12],ymm9[13],ymm8[13],ymm9[14],ymm8[14],ymm9[15],ymm8[15],ymm9[24],ymm8[24],ymm9[25],ymm8[25],ymm9[26],ymm8[26],ymm9[27],ymm8[27],ymm9[28],ymm8[28],ymm9[29],ymm8[29],ymm9[30],ymm8[30],ymm9[31],ymm8[31]
-; AVX512BW-FAST-NEXT:    vpermw %ymm17, %ymm10, %ymm10
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm7, %zmm10
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %ymm23
-; AVX512BW-FAST-NEXT:    vpshufb %ymm13, %ymm23, %ymm7
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdx), %ymm24
-; AVX512BW-FAST-NEXT:    vpshufb %ymm13, %ymm24, %ymm13
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm7 = ymm13[0],ymm7[0],ymm13[1],ymm7[1],ymm13[2],ymm7[2],ymm13[3],ymm7[3],ymm13[4],ymm7[4],ymm13[5],ymm7[5],ymm13[6],ymm7[6],ymm13[7],ymm7[7],ymm13[16],ymm7[16],ymm13[17],ymm7[17],ymm13[18],ymm7[18],ymm13[19],ymm7[19],ymm13[20],ymm7[20],ymm13[21],ymm7[21],ymm13[22],ymm7[22],ymm13[23],ymm7[23]
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm17 = ymm13[8],ymm11[8],ymm13[9],ymm11[9],ymm13[10],ymm11[10],ymm13[11],ymm11[11],ymm13[12],ymm11[12],ymm13[13],ymm11[13],ymm13[14],ymm11[14],ymm13[15],ymm11[15],ymm13[24],ymm11[24],ymm13[25],ymm11[25],ymm13[26],ymm11[26],ymm13[27],ymm11[27],ymm13[28],ymm11[28],ymm13[29],ymm11[29],ymm13[30],ymm11[30],ymm13[31],ymm11[31]
+; AVX512BW-FAST-NEXT:    vpermw %ymm17, %ymm8, %ymm8
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm8
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %ymm17
+; AVX512BW-FAST-NEXT:    vpshufb %ymm12, %ymm17, %ymm7
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdx), %ymm18
+; AVX512BW-FAST-NEXT:    vpshufb %ymm12, %ymm18, %ymm12
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm7 = ymm12[0],ymm7[0],ymm12[1],ymm7[1],ymm12[2],ymm7[2],ymm12[3],ymm7[3],ymm12[4],ymm7[4],ymm12[5],ymm7[5],ymm12[6],ymm7[6],ymm12[7],ymm7[7],ymm12[16],ymm7[16],ymm12[17],ymm7[17],ymm12[18],ymm7[18],ymm12[19],ymm7[19],ymm12[20],ymm7[20],ymm12[21],ymm7[21],ymm12[22],ymm7[22],ymm12[23],ymm7[23]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm13 = ymm24[8],ymm23[8],ymm24[9],ymm23[9],ymm24[10],ymm23[10],ymm24[11],ymm23[11],ymm24[12],ymm23[12],ymm24[13],ymm23[13],ymm24[14],ymm23[14],ymm24[15],ymm23[15],ymm24[24],ymm23[24],ymm24[25],ymm23[25],ymm24[26],ymm23[26],ymm24[27],ymm23[27],ymm24[28],ymm23[28],ymm24[29],ymm23[29],ymm24[30],ymm23[30],ymm24[31],ymm23[31]
-; AVX512BW-FAST-NEXT:    vpermw %ymm13, %ymm14, %ymm13
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm7, %zmm7
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm10, %zmm7 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa (%r8), %ymm10
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm11
-; AVX512BW-FAST-NEXT:    vpshufb %zmm15, %zmm11, %zmm11
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm11 = zmm11[2,2,2,3,6,6,6,7]
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm11, %zmm7 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqa (%r9), %ymm11
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm12, %zmm12
-; AVX512BW-FAST-NEXT:    vpshufb %zmm16, %zmm12, %zmm12
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm12 = zmm12[2,2,2,3,6,6,6,7]
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm12, %zmm7 {%k3}
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rcx), %xmm15
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm15, %xmm13
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdx), %xmm17
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm17, %xmm14
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,0,1]
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm17[0],xmm15[0],xmm17[1],xmm15[1],xmm17[2],xmm15[2],xmm17[3],xmm15[3],xmm17[4],xmm15[4],xmm17[5],xmm15[5],xmm17[6],xmm15[6],xmm17[7],xmm15[7]
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6]
-; AVX512BW-FAST-NEXT:    vpermw %ymm14, %ymm16, %ymm14
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm14, %zmm3
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rsi), %xmm13
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm18 = <u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm18, %xmm13, %xmm27
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%r8), %xmm21
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%r8), %xmm14
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm28 = <8,u,9,u,u,u,u,u,u,u,5,u,6,u,7,u>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm28, %xmm14, %xmm19
-; AVX512BW-FAST-NEXT:    vpmovzxbw {{.*#+}} xmm25 = xmm14[0],zero,xmm14[1],zero,xmm14[2],zero,xmm14[3],zero,xmm14[4],zero,xmm14[5],zero,xmm14[6],zero,xmm14[7],zero
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4,38,37,32,39,38,37,32,39,38,37,32,39,33,33,33,33]
-; AVX512BW-FAST-NEXT:    vpermt2w %zmm19, %zmm29, %zmm25
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%r9), %xmm22
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%r9), %xmm19
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm30 = <u,8,u,9,u,u,u,u,u,u,u,5,u,6,u,7>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm30, %xmm19, %xmm31
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm26 = xmm19[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BW-FAST-NEXT:    vpermt2w %zmm31, %zmm29, %zmm26
-; AVX512BW-FAST-NEXT:    vpshufb %xmm28, %xmm21, %xmm31
-; AVX512BW-FAST-NEXT:    vpmovzxbw {{.*#+}} xmm28 = xmm21[0],zero,xmm21[1],zero,xmm21[2],zero,xmm21[3],zero,xmm21[4],zero,xmm21[5],zero,xmm21[6],zero,xmm21[7],zero
-; AVX512BW-FAST-NEXT:    vpermt2w %zmm31, %zmm29, %zmm28
-; AVX512BW-FAST-NEXT:    vpshufb %xmm30, %xmm22, %xmm31
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm30 = xmm22[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512BW-FAST-NEXT:    vpermt2w %zmm31, %zmm29, %zmm30
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %xmm29
-; AVX512BW-FAST-NEXT:    vpshufb %xmm18, %xmm29, %xmm31
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm27 = xmm31[8],xmm27[8],xmm31[9],xmm27[9],xmm31[10],xmm27[10],xmm31[11],xmm27[11],xmm31[12],xmm27[12],xmm31[13],xmm27[13],xmm31[14],xmm27[14],xmm31[15],xmm27[15]
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %xmm31
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm23 = ymm24[0],ymm23[0],ymm24[1],ymm23[1],ymm24[2],ymm23[2],ymm24[3],ymm23[3],ymm24[4],ymm23[4],ymm24[5],ymm23[5],ymm24[6],ymm23[6],ymm24[7],ymm23[7],ymm24[16],ymm23[16],ymm24[17],ymm23[17],ymm24[18],ymm23[18],ymm24[19],ymm23[19],ymm24[20],ymm23[20],ymm24[21],ymm23[21],ymm24[22],ymm23[22],ymm24[23],ymm23[23]
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdx), %xmm24
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm24[8],xmm31[8],xmm24[9],xmm31[9],xmm24[10],xmm31[10],xmm24[11],xmm31[11],xmm24[12],xmm31[12],xmm24[13],xmm31[13],xmm24[14],xmm31[14],xmm24[15],xmm31[15]
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,41,40,43,42,41,40,43,42,41,40,43,42,45,44,47,46]
-; AVX512BW-FAST-NEXT:    vpermt2w %zmm23, %zmm2, %zmm0
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[1],ymm5[1],ymm6[2],ymm5[2],ymm6[3],ymm5[3],ymm6[4],ymm5[4],ymm6[5],ymm5[5],ymm6[6],ymm5[6],ymm6[7],ymm5[7],ymm6[16],ymm5[16],ymm6[17],ymm5[17],ymm6[18],ymm5[18],ymm6[19],ymm5[19],ymm6[20],ymm5[20],ymm6[21],ymm5[21],ymm6[22],ymm5[22],ymm6[23],ymm5[23]
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm17[8],xmm15[8],xmm17[9],xmm15[9],xmm17[10],xmm15[10],xmm17[11],xmm15[11],xmm17[12],xmm15[12],xmm17[13],xmm15[13],xmm17[14],xmm15[14],xmm17[15],xmm15[15]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm27[0,0,0,1]
-; AVX512BW-FAST-NEXT:    vpermt2w %zmm5, %zmm2, %zmm6
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm29[0],xmm13[0],xmm29[1],xmm13[1],xmm29[2],xmm13[2],xmm29[3],xmm13[3],xmm29[4],xmm13[4],xmm29[5],xmm13[5],xmm29[6],xmm13[6],xmm29[7],xmm13[7]
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
-; AVX512BW-FAST-NEXT:    vpermw %ymm2, %ymm5, %ymm2
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm2, %zmm2
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm3, %zmm2 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm25, %zmm2 {%k1}
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm12 = ymm18[8],ymm17[8],ymm18[9],ymm17[9],ymm18[10],ymm17[10],ymm18[11],ymm17[11],ymm18[12],ymm17[12],ymm18[13],ymm17[13],ymm18[14],ymm17[14],ymm18[15],ymm17[15],ymm18[24],ymm17[24],ymm18[25],ymm17[25],ymm18[26],ymm17[26],ymm18[27],ymm17[27],ymm18[28],ymm17[28],ymm18[29],ymm17[29],ymm18[30],ymm17[30],ymm18[31],ymm17[31]
+; AVX512BW-FAST-NEXT:    vpermw %ymm12, %ymm14, %ymm12
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm7, %zmm7
+; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm8, %zmm7 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa (%r8), %ymm8
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm9
+; AVX512BW-FAST-NEXT:    vpshufb %zmm15, %zmm9, %zmm9
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm9 = zmm9[2,2,2,3,6,6,6,7]
+; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm9, %zmm7 {%k2}
+; AVX512BW-FAST-NEXT:    vmovdqa (%r9), %ymm9
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm10, %zmm10
+; AVX512BW-FAST-NEXT:    vpshufb %zmm16, %zmm10, %zmm10
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm10 = zmm10[2,2,2,3,6,6,6,7]
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm10, %zmm7 {%k3}
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %xmm21
+; AVX512BW-FAST-NEXT:    vmovdqa 32(%rcx), %xmm12
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm23 = <5,8,7,6,9,u,u,10,u,u,u,u,u,u,u,u>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm23, %xmm12, %xmm10
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdx), %xmm22
+; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdx), %xmm14
+; AVX512BW-FAST-NEXT:    vpshufb %xmm23, %xmm14, %xmm15
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm15[0],xmm10[0],xmm15[1],xmm10[1],xmm15[2],xmm10[2],xmm15[3],xmm10[3],xmm15[4],xmm10[4],xmm15[5],xmm10[5],xmm15[6],xmm10[6],xmm15[7],xmm10[7]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,0,1]
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm14[0],xmm12[0],xmm14[1],xmm12[1],xmm14[2],xmm12[2],xmm14[3],xmm12[3],xmm14[4],xmm12[4],xmm14[5],xmm12[5],xmm14[6],xmm12[6],xmm14[7],xmm12[7]
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [1,0,3,2,1,0,3,2,1,0,3,2,5,4,7,6]
+; AVX512BW-FAST-NEXT:    vpermw %ymm15, %ymm24, %ymm15
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm15, %zmm16
+; AVX512BW-FAST-NEXT:    vmovdqa 32(%rsi), %xmm15
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm25 = <u,u,u,u,u,u,u,u,8,7,6,9,u,u,10,u>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm25, %xmm15, %xmm10
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %xmm19
+; AVX512BW-FAST-NEXT:    vpshufb %xmm25, %xmm19, %xmm20
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm10 = xmm20[8],xmm10[8],xmm20[9],xmm10[9],xmm20[10],xmm10[10],xmm20[11],xmm10[11],xmm20[12],xmm10[12],xmm20[13],xmm10[13],xmm20[14],xmm10[14],xmm20[15],xmm10[15]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,0,1]
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm20 = xmm19[0],xmm15[0],xmm19[1],xmm15[1],xmm19[2],xmm15[2],xmm19[3],xmm15[3],xmm19[4],xmm15[4],xmm19[5],xmm15[5],xmm19[6],xmm15[6],xmm19[7],xmm15[7]
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [0,3,2,1,0,3,2,1,0,3,2,1,4,5,6,5]
+; AVX512BW-FAST-NEXT:    vpermw %ymm20, %ymm26, %ymm20
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm20, %zmm10
+; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm16, %zmm10 {%k2}
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%r8), %xmm16
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm27 = <8,u,9,u,u,u,u,u,u,u,5,u,6,u,7,u>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm27, %xmm16, %xmm20
+; AVX512BW-FAST-NEXT:    vpmovzxbw {{.*#+}} xmm28 = xmm16[0],zero,xmm16[1],zero,xmm16[2],zero,xmm16[3],zero,xmm16[4],zero,xmm16[5],zero,xmm16[6],zero,xmm16[7],zero
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm28, %zmm20
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [2,1,0,3,2,1,0,3,2,1,0,3,4,4,4,4,22,21,16,23,22,21,16,23,22,21,16,23,17,17,17,17]
+; AVX512BW-FAST-NEXT:    vpermw %zmm20, %zmm28, %zmm10 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%r9), %xmm20
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm29 = <u,8,u,9,u,10,u,11,u,4,u,5,u,6,u,7>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm29, %xmm20, %xmm30
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm31 = xmm20[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm30, %zmm31, %zmm30
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rsi), %xmm31
+; AVX512BW-FAST-NEXT:    vpermw %zmm30, %zmm28, %zmm30
 ; AVX512BW-FAST-NEXT:    movabsq $585610922974906400, %rax # imm = 0x820820820820820
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm26, %zmm2 {%k3}
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm31, %xmm3
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm24, %xmm12
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm12[0],xmm3[0],xmm12[1],xmm3[1],xmm12[2],xmm3[2],xmm12[3],xmm3[3],xmm12[4],xmm3[4],xmm12[5],xmm3[5],xmm12[6],xmm3[6],xmm12[7],xmm3[7]
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm24[0],xmm31[0],xmm24[1],xmm31[1],xmm24[2],xmm31[2],xmm24[3],xmm31[3],xmm24[4],xmm31[4],xmm24[5],xmm31[5],xmm24[6],xmm31[6],xmm24[7],xmm31[7]
-; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %xmm15
-; AVX512BW-FAST-NEXT:    vpermw %ymm12, %ymm16, %ymm12
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %xmm16
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm12, %zmm3
-; AVX512BW-FAST-NEXT:    vpshufb %xmm18, %xmm15, %xmm12
-; AVX512BW-FAST-NEXT:    vpshufb %xmm18, %xmm16, %xmm17
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm12 = xmm17[8],xmm12[8],xmm17[9],xmm12[9],xmm17[10],xmm12[10],xmm17[11],xmm12[11],xmm17[12],xmm12[12],xmm17[13],xmm12[13],xmm17[14],xmm12[14],xmm17[15],xmm12[15]
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm17 = xmm16[0],xmm15[0],xmm16[1],xmm15[1],xmm16[2],xmm15[2],xmm16[3],xmm15[3],xmm16[4],xmm15[4],xmm16[5],xmm15[5],xmm16[6],xmm15[6],xmm16[7],xmm15[7]
-; AVX512BW-FAST-NEXT:    vpermw %ymm17, %ymm5, %ymm5
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,0,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm5, %zmm5
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm3, %zmm5 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm28, %zmm5 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm30, %zmm5 {%k3}
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[4],ymm8[4],ymm9[5],ymm8[5],ymm9[6],ymm8[6],ymm9[7],ymm8[7],ymm9[16],ymm8[16],ymm9[17],ymm8[17],ymm9[18],ymm8[18],ymm9[19],ymm8[19],ymm9[20],ymm8[20],ymm9[21],ymm8[21],ymm9[22],ymm8[22],ymm9[23],ymm8[23]
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm16[8],xmm15[8],xmm16[9],xmm15[9],xmm16[10],xmm15[10],xmm16[11],xmm15[11],xmm16[12],xmm15[12],xmm16[13],xmm15[13],xmm16[14],xmm15[14],xmm16[15],xmm15[15]
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,40,43,42,41,40,43,42,41,40,43,42,41,44,45,46,45]
-; AVX512BW-FAST-NEXT:    vpermt2w %zmm3, %zmm9, %zmm8
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm0, %zmm8 {%k1}
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %ymm20, %ymm0 # 32-byte Folded Reload
-; AVX512BW-FAST-NEXT:    # ymm0 = ymm20[0],mem[0],ymm20[1],mem[1],ymm20[2],mem[2],ymm20[3],mem[3],ymm20[4],mem[4],ymm20[5],mem[5],ymm20[6],mem[6],ymm20[7],mem[7],ymm20[16],mem[16],ymm20[17],mem[17],ymm20[18],mem[18],ymm20[19],mem[19],ymm20[20],mem[20],ymm20[21],mem[21],ymm20[22],mem[22],ymm20[23],mem[23]
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm29[8],xmm13[8],xmm29[9],xmm13[9],xmm29[10],xmm13[10],xmm29[11],xmm13[11],xmm29[12],xmm13[12],xmm29[13],xmm13[13],xmm29[14],xmm13[14],xmm29[15],xmm13[15]
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm12 = xmm21[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-FAST-NEXT:    vpermt2w %zmm0, %zmm9, %zmm3
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7]
-; AVX512BW-FAST-NEXT:    vpermw %ymm12, %ymm0, %ymm9
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <2,u,1,u,0,u,3,u,u,u,u,u,4,u,u,u,2,u,1,u,0,u,3,u,u,u,u,u,4,u,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %ymm12, %ymm10, %ymm10
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,2,2,3]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm30, %zmm10 {%k3}
+; AVX512BW-FAST-NEXT:    vpshufb %xmm23, %xmm21, %xmm30
+; AVX512BW-FAST-NEXT:    vpshufb %xmm23, %xmm22, %xmm23
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm23 = xmm23[0],xmm30[0],xmm23[1],xmm30[1],xmm23[2],xmm30[2],xmm23[3],xmm30[3],xmm23[4],xmm30[4],xmm23[5],xmm30[5],xmm23[6],xmm30[6],xmm23[7],xmm30[7]
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm30 = xmm22[0],xmm21[0],xmm22[1],xmm21[1],xmm22[2],xmm21[2],xmm22[3],xmm21[3],xmm22[4],xmm21[4],xmm22[5],xmm21[5],xmm22[6],xmm21[6],xmm22[7],xmm21[7]
+; AVX512BW-FAST-NEXT:    vpermw %ymm30, %ymm24, %ymm24
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %xmm30
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[0,0,0,1]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm23, %zmm24, %zmm24
+; AVX512BW-FAST-NEXT:    vpshufb %xmm25, %xmm31, %xmm23
+; AVX512BW-FAST-NEXT:    vpshufb %xmm25, %xmm30, %xmm25
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm23 = xmm25[8],xmm23[8],xmm25[9],xmm23[9],xmm25[10],xmm23[10],xmm25[11],xmm23[11],xmm25[12],xmm23[12],xmm25[13],xmm23[13],xmm25[14],xmm23[14],xmm25[15],xmm23[15]
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm25 = xmm30[0],xmm31[0],xmm30[1],xmm31[1],xmm30[2],xmm31[2],xmm30[3],xmm31[3],xmm30[4],xmm31[4],xmm30[5],xmm31[5],xmm30[6],xmm31[6],xmm30[7],xmm31[7]
+; AVX512BW-FAST-NEXT:    vpermw %ymm25, %ymm26, %ymm25
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[0,0,0,1]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm23, %zmm25, %zmm23
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%r8), %xmm25
+; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm24, %zmm23 {%k2}
+; AVX512BW-FAST-NEXT:    vpshufb %xmm27, %xmm25, %xmm24
+; AVX512BW-FAST-NEXT:    vpmovzxbw {{.*#+}} xmm26 = xmm25[0],zero,xmm25[1],zero,xmm25[2],zero,xmm25[3],zero,xmm25[4],zero,xmm25[5],zero,xmm25[6],zero,xmm25[7],zero
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm26, %zmm24
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%r9), %xmm26
+; AVX512BW-FAST-NEXT:    vpermw %zmm24, %zmm28, %zmm23 {%k1}
+; AVX512BW-FAST-NEXT:    vpshufb %xmm29, %xmm26, %xmm24
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm27 = xmm26[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm24, %zmm27, %zmm24
+; AVX512BW-FAST-NEXT:    vpermw %zmm24, %zmm28, %zmm24
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm24, %zmm23 {%k3}
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm17 = ymm18[0],ymm17[0],ymm18[1],ymm17[1],ymm18[2],ymm17[2],ymm18[3],ymm17[3],ymm18[4],ymm17[4],ymm18[5],ymm17[5],ymm18[6],ymm17[6],ymm18[7],ymm17[7],ymm18[16],ymm17[16],ymm18[17],ymm17[17],ymm18[18],ymm17[18],ymm18[19],ymm17[19],ymm18[20],ymm17[20],ymm18[21],ymm17[21],ymm18[22],ymm17[22],ymm18[23],ymm17[23]
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm18 = xmm22[8],xmm21[8],xmm22[9],xmm21[9],xmm22[10],xmm21[10],xmm22[11],xmm21[11],xmm22[12],xmm21[12],xmm22[13],xmm21[13],xmm22[14],xmm21[14],xmm22[15],xmm21[15]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm18, %zmm17
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm11 = ymm13[0],ymm11[0],ymm13[1],ymm11[1],ymm13[2],ymm11[2],ymm13[3],ymm11[3],ymm13[4],ymm11[4],ymm13[5],ymm11[5],ymm13[6],ymm11[6],ymm13[7],ymm11[7],ymm13[16],ymm11[16],ymm13[17],ymm11[17],ymm13[18],ymm11[18],ymm13[19],ymm11[19],ymm13[20],ymm11[20],ymm13[21],ymm11[21],ymm13[22],ymm11[22],ymm13[23],ymm11[23]
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm13 = xmm30[8],xmm31[8],xmm30[9],xmm31[9],xmm30[10],xmm31[10],xmm30[11],xmm31[11],xmm30[12],xmm31[12],xmm30[13],xmm31[13],xmm30[14],xmm31[14],xmm30[15],xmm31[15]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm13, %zmm11
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [4,3,6,5,4,3,6,5,4,3,6,5,7,7,7,7,24,27,26,25,24,27,26,25,24,27,26,25,28,29,30,29]
+; AVX512BW-FAST-NEXT:    vpermw %zmm11, %zmm13, %zmm11
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [5,4,3,6,5,4,3,6,5,4,3,6,7,7,7,7,25,24,27,26,25,24,27,26,25,24,27,26,29,28,31,30]
+; AVX512BW-FAST-NEXT:    vpermw %zmm17, %zmm18, %zmm11 {%k1}
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm6[0],ymm5[0],ymm6[1],ymm5[1],ymm6[2],ymm5[2],ymm6[3],ymm5[3],ymm6[4],ymm5[4],ymm6[5],ymm5[5],ymm6[6],ymm5[6],ymm6[7],ymm5[7],ymm6[16],ymm5[16],ymm6[17],ymm5[17],ymm6[18],ymm5[18],ymm6[19],ymm5[19],ymm6[20],ymm5[20],ymm6[21],ymm5[21],ymm6[22],ymm5[22],ymm6[23],ymm5[23]
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm14[8],xmm12[8],xmm14[9],xmm12[9],xmm14[10],xmm12[10],xmm14[11],xmm12[11],xmm14[12],xmm12[12],xmm14[13],xmm12[13],xmm14[14],xmm12[14],xmm14[15],xmm12[15]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm5
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[16],ymm1[16],ymm2[17],ymm1[17],ymm2[18],ymm1[18],ymm2[19],ymm1[19],ymm2[20],ymm1[20],ymm2[21],ymm1[21],ymm2[22],ymm1[22],ymm2[23],ymm1[23]
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm19[8],xmm15[8],xmm19[9],xmm15[9],xmm19[10],xmm15[10],xmm19[11],xmm15[11],xmm19[12],xmm15[12],xmm19[13],xmm15[13],xmm19[14],xmm15[14],xmm19[15],xmm15[15]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512BW-FAST-NEXT:    vpermw %zmm1, %zmm13, %zmm1
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm25[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-FAST-NEXT:    vpermw %zmm5, %zmm18, %zmm1 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [2,5,4,3,2,5,4,3,2,5,4,3,6,5,6,7]
+; AVX512BW-FAST-NEXT:    vpermw %ymm2, %ymm5, %ymm2
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <2,u,1,u,0,u,3,u,u,u,u,u,4,u,u,u,2,u,1,u,0,u,3,u,u,u,u,u,4,u,u,u>
+; AVX512BW-FAST-NEXT:    vpshufb %ymm6, %ymm8, %ymm8
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm2, %zmm2
 ; AVX512BW-FAST-NEXT:    movl $1227133513, %eax # imm = 0x49249249
-; AVX512BW-FAST-NEXT:    kmovd %eax, %k2
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm9, %zmm8 {%k2}
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm22[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-FAST-NEXT:    vpermw %ymm9, %ymm0, %ymm9
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,2,u,1,u,0,u,3,u,u,u,u,u,4,u,u,u,2,u,1,u,0,u,3,u,u,u,u,u,4,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %ymm10, %ymm11, %ymm11
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,2,3]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm9, %zmm9
+; AVX512BW-FAST-NEXT:    kmovd %eax, %k1
+; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm2, %zmm11 {%k1}
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm26[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-FAST-NEXT:    vpermw %ymm2, %ymm5, %ymm2
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,2,u,1,u,0,u,3,u,u,u,u,u,4,u,u,u,2,u,1,u,0,u,3,u,u,u,u,u,4,u,u>
+; AVX512BW-FAST-NEXT:    vpshufb %ymm8, %ymm9, %ymm9
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm2, %zmm2
 ; AVX512BW-FAST-NEXT:    movabsq $2342443691899625602, %rax # imm = 0x2082082082082082
-; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm9, %zmm8 {%k3}
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm6, %zmm3 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vpshufb %ymm12, %ymm6, %ymm6
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm14[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-FAST-NEXT:    vpermw %ymm9, %ymm0, %ymm9
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm9, %zmm6
-; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm6, %zmm3 {%k2}
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm19[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512BW-FAST-NEXT:    vpermw %ymm6, %ymm0, %ymm0
-; AVX512BW-FAST-NEXT:    vpshufb %ymm10, %ymm4, %ymm4
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm3 {%k3}
+; AVX512BW-FAST-NEXT:    kmovq %rax, %k2
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm2, %zmm11 {%k2}
+; AVX512BW-FAST-NEXT:    vpshufb %ymm6, %ymm3, %ymm2
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm16[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-FAST-NEXT:    vpermw %ymm3, %ymm5, %ymm3
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512BW-FAST-NEXT:    vmovdqu16 %zmm2, %zmm1 {%k1}
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm20[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX512BW-FAST-NEXT:    vpermw %ymm2, %ymm5, %ymm2
+; AVX512BW-FAST-NEXT:    vpshufb %ymm8, %ymm4, %ymm3
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm2, %zmm1 {%k2}
 ; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm3, 256(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm8, 64(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm2, 192(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm1, 256(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm11, 64(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm23, (%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm10, 192(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm7, 128(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm1, 320(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, 320(%rax)
 ; AVX512BW-FAST-NEXT:    vzeroupper
 ; AVX512BW-FAST-NEXT:    retq
   %in.vec0 = load <64 x i8>, ptr %in.vecptr0, align 64

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
index e5b03241c8e3a..844dc41240166 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
@@ -205,10 +205,10 @@ define void @store_i8_stride7_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX2-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX2-ONLY-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],mem[0],xmm2[1],mem[1]
 ; AVX2-ONLY-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm2
-; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-ONLY-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,4,8,12],zero,zero,zero,ymm0[1,5,9,13],zero,zero,zero,ymm0[2,6],zero,zero,ymm0[18,22,26],zero,zero,zero,zero,ymm0[19,23,27],zero,zero,zero,zero
+; AVX2-ONLY-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,ymm0[0,4,8],zero,zero,zero,zero,ymm0[1,5,9],zero,zero,ymm0[26,30],zero,zero,zero,ymm0[19,23,27,31],zero,zero,zero,zero,zero,zero,zero
-; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,4,8,12],zero,zero,zero,ymm2[1,5,9,13],zero,zero,zero,ymm2[2,6],zero,zero,ymm2[18,22,26],zero,zero,zero,zero,ymm2[19,23,27],zero,zero,zero,zero
 ; AVX2-ONLY-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX2-ONLY-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-ONLY-NEXT:    vpextrd $2, %xmm1, 24(%rax)
@@ -229,10 +229,10 @@ define void @store_i8_stride7_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX512F-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],mem[0],xmm2[1],mem[1]
 ; AVX512F-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm2
-; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,4,8,12],zero,zero,zero,ymm0[1,5,9,13],zero,zero,zero,ymm0[2,6],zero,zero,ymm0[18,22,26],zero,zero,zero,zero,ymm0[19,23,27,u,u,u,u]
+; AVX512F-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,ymm0[0,4,8],zero,zero,zero,zero,ymm0[1,5,9],zero,zero,ymm0[26,30],zero,zero,zero,ymm0[19,23,27,31],zero,zero,zero,zero,zero,zero,zero
-; AVX512F-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,4,8,12],zero,zero,zero,ymm2[1,5,9,13],zero,zero,zero,ymm2[2,6],zero,zero,ymm2[18,22,26],zero,zero,zero,zero,ymm2[19,23,27,u,u,u,u]
 ; AVX512F-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512F-NEXT:    vpextrd $2, %xmm1, 24(%rax)
@@ -253,11 +253,11 @@ define void @store_i8_stride7_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX512BW-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm2[0],mem[0],xmm2[1],mem[1]
 ; AVX512BW-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm2
-; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[0,4,8,12],zero,zero,zero,ymm2[1,5,9,13],zero,zero,zero,ymm2[2,6],zero,zero,ymm2[18,22,26],zero,zero,zero,zero,ymm2[19,23,27],zero,zero,zero,zero
-; AVX512BW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[0,4,8,12],zero,zero,zero,ymm0[1,5,9,13],zero,zero,zero,ymm0[2,6],zero,zero,ymm0[18,22,26],zero,zero,zero,zero,ymm0[19,23,27],zero,zero,zero,zero
+; AVX512BW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
 ; AVX512BW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,ymm0[0,4,8],zero,zero,zero,zero,ymm0[1,5,9],zero,zero,ymm0[26,30],zero,zero,zero,ymm0[19,23,27,31],zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512BW-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512BW-NEXT:    vpextrd $2, %xmm1, 24(%rax)
 ; AVX512BW-NEXT:    vmovq %xmm1, 16(%rax)
@@ -512,24 +512,24 @@ define void @store_i8_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX2-SLOW-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX2-SLOW-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
 ; AVX2-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm4
-; AVX2-SLOW-NEXT:    vmovq {{.*#+}} xmm5 = mem[0],zero
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-SLOW-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm5[0],ymm0[2],ymm5[2]
+; AVX2-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[0,2,1,3]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,ymm2[5,13],zero,zero,zero,zero,zero,ymm2[6,14],zero,zero,zero,ymm2[23,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,ymm4[5,13],zero,zero,zero,zero,zero,ymm4[6,14],zero,zero,zero,zero,zero,zero,zero,ymm4[23,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-SLOW-NEXT:    vpor %ymm3, %ymm5, %ymm3
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,ymm2[5,13],zero,zero,zero,zero,zero,ymm2[6,14],zero,zero,zero,zero,zero,zero,zero,ymm2[23,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,ymm4[5,13],zero,zero,zero,zero,zero,ymm4[6,14],zero,zero,zero,ymm4[23,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpor %ymm5, %ymm3, %ymm3
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,3,0,2]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,ymm0[4],zero,zero,zero,zero,zero,zero,ymm0[5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[23,31],zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm1[4,12],zero,zero,zero,zero,zero,ymm1[5,13],zero,zero,zero,zero,zero,ymm1[6,14,22],zero,zero,zero,zero,zero,zero,ymm1[23],zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX2-SLOW-NEXT:    vpor %ymm5, %ymm6, %ymm5
 ; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,u,u,u,u,u,u,u,u>
 ; AVX2-SLOW-NEXT:    vpblendvb %ymm6, %ymm3, %ymm5, %ymm3
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,ymm2[0,8],zero,zero,zero,zero,zero,ymm2[1,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[19,27],zero,zero,zero,zero,zero,ymm2[20,28],zero,zero
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[0,8],zero,zero,zero,zero,zero,ymm4[1,9],zero,zero,zero,zero,zero,ymm4[2,10,18,26],zero,zero,zero,zero,zero,ymm4[19,27],zero,zero,zero,zero,zero,ymm4[20,28]
-; AVX2-SLOW-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[0,8],zero,zero,zero,zero,zero,ymm2[1,9],zero,zero,zero,zero,zero,ymm2[2,10,18,26],zero,zero,zero,zero,zero,ymm2[19,27],zero,zero,zero,zero,zero,ymm2[20,28]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,ymm4[0,8],zero,zero,zero,zero,zero,ymm4[1,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[19,27],zero,zero,zero,zero,zero,ymm4[20,28],zero,zero
+; AVX2-SLOW-NEXT:    vpor %ymm4, %ymm2, %ymm2
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,ymm0[0],zero,zero,zero,zero,zero,zero,ymm0[1],zero,zero,zero,zero,ymm0[18,26],zero,zero,zero,zero,zero,ymm0[19,27],zero,zero,zero,zero,zero
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,ymm1[0,8],zero,zero,zero,zero,zero,ymm1[1,9],zero,zero,zero,zero,zero,zero,zero,ymm1[18],zero,zero,zero,zero,zero,zero,ymm1[19],zero,zero,zero,zero
 ; AVX2-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
@@ -554,22 +554,22 @@ define void @store_i8_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX2-FAST-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX2-FAST-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
 ; AVX2-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm4
-; AVX2-FAST-NEXT:    vmovq {{.*#+}} xmm5 = mem[0],zero
+; AVX2-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX2-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm5[0],ymm0[2],ymm5[2]
+; AVX2-FAST-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
 ; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
 ; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [1,3,5,7,1,3,5,7]
 ; AVX2-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX2-FAST-NEXT:    vpermd %ymm4, %ymm1, %ymm1
+; AVX2-FAST-NEXT:    vpermd %ymm2, %ymm1, %ymm1
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,ymm1[1,5,9,13],zero,zero,zero,ymm1[2,6,10,14],zero,zero,zero,ymm1[19,23,27,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <1,3,5,u,5,1,3,u>
-; AVX2-FAST-NEXT:    vpermd %ymm0, %ymm5, %ymm5
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[0,4,8],zero,zero,zero,zero,ymm5[1,5,9],zero,zero,zero,zero,ymm5[2,6,18],zero,zero,zero,zero,ymm5[23,27,19],zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-FAST-NEXT:    vpor %ymm5, %ymm1, %ymm1
-; AVX2-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <1,3,5,u,5,1,3,u>
+; AVX2-FAST-NEXT:    vpermd %ymm0, %ymm3, %ymm3
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[0,4,8],zero,zero,zero,zero,ymm3[1,5,9],zero,zero,zero,zero,ymm3[2,6,18],zero,zero,zero,zero,ymm3[23,27,19],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-FAST-NEXT:    vpor %ymm3, %ymm1, %ymm1
+; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm2[0,8],zero,zero,zero,zero,zero,ymm2[1,9],zero,zero,zero,zero,zero,ymm2[2,10,18,26],zero,zero,zero,zero,zero,ymm2[19,27],zero,zero,zero,zero,zero,ymm2[20,28]
+; AVX2-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,ymm2[0,8],zero,zero,zero,zero,zero,ymm2[1,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[19,27],zero,zero,zero,zero,zero,ymm2[20,28],zero,zero
-; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm4[0,8],zero,zero,zero,zero,zero,ymm4[1,9],zero,zero,zero,zero,zero,ymm4[2,10,18,26],zero,zero,zero,zero,zero,ymm4[19,27],zero,zero,zero,zero,zero,ymm4[20,28]
 ; AVX2-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm2
 ; AVX2-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,2,4,6,0,2,4,6]
 ; AVX2-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
@@ -596,24 +596,24 @@ define void @store_i8_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX2-FAST-PERLANE-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX2-FAST-PERLANE-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
 ; AVX2-FAST-PERLANE-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vmovq {{.*#+}} xmm5 = mem[0],zero
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm5[0],ymm0[2],ymm5[2]
+; AVX2-FAST-PERLANE-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[0,2,1,3]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,ymm2[5,13],zero,zero,zero,zero,zero,ymm2[6,14],zero,zero,zero,ymm2[23,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,ymm4[5,13],zero,zero,zero,zero,zero,ymm4[6,14],zero,zero,zero,zero,zero,zero,zero,ymm4[23,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vpor %ymm3, %ymm5, %ymm3
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,ymm2[5,13],zero,zero,zero,zero,zero,ymm2[6,14],zero,zero,zero,zero,zero,zero,zero,ymm2[23,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,ymm4[5,13],zero,zero,zero,zero,zero,ymm4[6,14],zero,zero,zero,ymm4[23,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpor %ymm5, %ymm3, %ymm3
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,3,0,2]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,ymm0[4],zero,zero,zero,zero,zero,zero,ymm0[5],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[23,31],zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm6 = ymm1[4,12],zero,zero,zero,zero,zero,ymm1[5,13],zero,zero,zero,zero,zero,ymm1[6,14,22],zero,zero,zero,zero,zero,zero,ymm1[23],zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX2-FAST-PERLANE-NEXT:    vpor %ymm5, %ymm6, %ymm5
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} ymm6 = <0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,u,u,u,u,u,u,u,u>
 ; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm6, %ymm3, %ymm5, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,ymm2[0,8],zero,zero,zero,zero,zero,ymm2[1,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[19,27],zero,zero,zero,zero,zero,ymm2[20,28],zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[0,8],zero,zero,zero,zero,zero,ymm4[1,9],zero,zero,zero,zero,zero,ymm4[2,10,18,26],zero,zero,zero,zero,zero,ymm4[19,27],zero,zero,zero,zero,zero,ymm4[20,28]
-; AVX2-FAST-PERLANE-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[0,8],zero,zero,zero,zero,zero,ymm2[1,9],zero,zero,zero,zero,zero,ymm2[2,10,18,26],zero,zero,zero,zero,zero,ymm2[19,27],zero,zero,zero,zero,zero,ymm2[20,28]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,ymm4[0,8],zero,zero,zero,zero,zero,ymm4[1,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[19,27],zero,zero,zero,zero,zero,ymm4[20,28],zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpor %ymm4, %ymm2, %ymm2
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,ymm0[0],zero,zero,zero,zero,zero,zero,ymm0[1],zero,zero,zero,zero,ymm0[18,26],zero,zero,zero,zero,zero,ymm0[19,27],zero,zero,zero,zero,zero
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,ymm1[0,8],zero,zero,zero,zero,zero,ymm1[1,9],zero,zero,zero,zero,zero,zero,zero,ymm1[18],zero,zero,zero,zero,zero,zero,ymm1[19],zero,zero,zero,zero
 ; AVX2-FAST-PERLANE-NEXT:    vpor %ymm0, %ymm1, %ymm0
@@ -638,18 +638,18 @@ define void @store_i8_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX512F-SLOW-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX512F-SLOW-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
 ; AVX512F-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm4
-; AVX512F-SLOW-NEXT:    vmovq {{.*#+}} xmm5 = mem[0],zero
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX512F-SLOW-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512F-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm5[0],ymm0[2],ymm5[2]
+; AVX512F-SLOW-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[2],ymm3[2]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[0,2,1,3]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,ymm2[0,8,u,u,u],zero,zero,ymm2[1,9,u,u,u],zero,zero,zero,zero,ymm2[u,u,u,19,27],zero,zero,ymm2[u,u,u,20,28],zero,zero
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u],zero,zero,ymm2[5,13,u,u,u],zero,zero,ymm2[6,14,u,u,u,23,31],zero,zero,ymm2[u,u,u],zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm4[0,8],zero,zero,ymm4[u,u,u,1,9],zero,zero,ymm4[u,u,u,2,10,18,26,u,u,u],zero,zero,ymm4[19,27,u,u,u],zero,zero,ymm4[20,28]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,5,13],zero,zero,ymm4[u,u,u,6,14],zero,zero,ymm4[u,u,u],zero,zero,ymm4[23,31,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm2[0,8],zero,zero,ymm2[u,u,u,1,9],zero,zero,ymm2[u,u,u,2,10,18,26,u,u,u],zero,zero,ymm2[19,27,u,u,u],zero,zero,ymm2[20,28]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm2[u,u,u,5,13],zero,zero,ymm2[u,u,u,6,14],zero,zero,ymm2[u,u,u],zero,zero,ymm2[23,31,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,ymm2[0,8,u,u,u],zero,zero,ymm2[1,9,u,u,u],zero,zero,zero,zero,ymm2[u,u,u,19,27],zero,zero,ymm2[u,u,u,20,28],zero,zero
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u],zero,zero,ymm2[5,13,u,u,u],zero,zero,ymm2[6,14,u,u,u,23,31],zero,zero,ymm2[u,u,u],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm4, %zmm2
 ; AVX512F-SLOW-NEXT:    vporq %zmm2, %zmm3, %zmm2
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,3,0,2]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm0[u,u,u,u],zero,zero,ymm0[0,u,u,u,u],zero,zero,ymm0[1,u,u,u,u,18,26],zero,ymm0[u,u,u,u,19,27],zero,ymm0[u,u,u,u]
@@ -679,28 +679,28 @@ define void @store_i8_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX512F-FAST-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX512F-FAST-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
 ; AVX512F-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm4
-; AVX512F-FAST-NEXT:    vmovq {{.*#+}} xmm5 = mem[0],zero
+; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX512F-FAST-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,2,4,u>
-; AVX512F-FAST-NEXT:    vpermi2q %ymm5, %ymm0, %ymm1
+; AVX512F-FAST-NEXT:    vpermi2q %ymm3, %ymm0, %ymm1
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [0,2,4,6,0,2,4,6]
 ; AVX512F-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,ymm0[0,4,8],zero,zero,zero,zero,ymm0[1,5,9],zero,zero,zero,zero,ymm0[18,22,26],zero,zero,zero,zero,ymm0[19,23,27],zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <1,3,5,u,5,1,3,u>
-; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm5, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <1,3,5,u,5,1,3,u>
+; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm3, %ymm1
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,4,8],zero,zero,zero,zero,ymm1[1,5,9],zero,zero,zero,zero,ymm1[2,6,18],zero,zero,zero,zero,ymm1[23,27,19],zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm4[0,8],zero,zero,ymm4[u,u,u,1,9],zero,zero,ymm4[u,u,u,2,10,18,26,u,u,u],zero,zero,ymm4[19,27,u,u,u],zero,zero,ymm4[20,28]
-; AVX512F-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,ymm2[0,8,u,u,u],zero,zero,ymm2[1,9,u,u,u],zero,zero,zero,zero,ymm2[u,u,u,19,27],zero,zero,ymm2[u,u,u,20,28],zero,zero
-; AVX512F-FAST-NEXT:    vpternlogq $168, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,8],zero,zero,ymm2[u,u,u,1,9],zero,zero,ymm2[u,u,u,2,10,18,26,u,u,u],zero,zero,ymm2[19,27,u,u,u],zero,zero,ymm2[20,28]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm2[2,3,0,1]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,ymm3[0,8,u,u,u],zero,zero,ymm3[1,9,u,u,u],zero,zero,zero,zero,ymm3[u,u,u,19,27],zero,zero,ymm3[u,u,u,20,28],zero,zero
+; AVX512F-FAST-NEXT:    vpternlogq $168, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [1,3,5,7,1,3,5,7]
 ; AVX512F-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermd %ymm4, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vpermd %ymm2, %ymm1, %ymm1
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,ymm1[1,5,9,13],zero,zero,zero,ymm1[2,6,10,14],zero,zero,zero,ymm1[19,23,27,31],zero,zero,zero,ymm1[u,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
 ; AVX512F-FAST-NEXT:    vporq %zmm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vextracti32x4 $2, %zmm0, 32(%rax)
 ; AVX512F-FAST-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
@@ -758,23 +758,23 @@ define void @store_i8_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX512BW-FAST-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX512BW-FAST-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
 ; AVX512BW-FAST-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm4
-; AVX512BW-FAST-NEXT:    vmovq {{.*#+}} xmm5 = mem[0],zero
+; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX512BW-FAST-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <0,2,4,u>
-; AVX512BW-FAST-NEXT:    vpermi2q %ymm5, %ymm0, %ymm1
+; AVX512BW-FAST-NEXT:    vpermi2q %ymm3, %ymm0, %ymm1
 ; AVX512BW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [1,3,5,7,1,3,5,7]
 ; AVX512BW-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512BW-FAST-NEXT:    vpermd %ymm4, %ymm0, %ymm0
+; AVX512BW-FAST-NEXT:    vpermd %ymm2, %ymm0, %ymm0
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm0[1,5,9,13],zero,zero,zero,ymm0[2,6,10,14],zero,zero,zero,ymm0[19,23,27,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <1,3,5,u,5,1,3,u>
-; AVX512BW-FAST-NEXT:    vpermd %ymm1, %ymm5, %ymm5
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[0,4,8],zero,zero,zero,zero,ymm5[1,5,9],zero,zero,zero,zero,ymm5[2,6,18],zero,zero,zero,zero,ymm5[23,27,19],zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpor %ymm0, %ymm5, %ymm0
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[0,8],zero,zero,zero,zero,zero,ymm4[1,9],zero,zero,zero,zero,zero,ymm4[2,10,18,26],zero,zero,zero,zero,zero,ymm4[19,27],zero,zero,zero,zero,zero,ymm4[20,28]
-; AVX512BW-FAST-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <1,3,5,u,5,1,3,u>
+; AVX512BW-FAST-NEXT:    vpermd %ymm1, %ymm3, %ymm3
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[0,4,8],zero,zero,zero,zero,ymm3[1,5,9],zero,zero,zero,zero,ymm3[2,6,18],zero,zero,zero,zero,ymm3[23,27,19],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-FAST-NEXT:    vpor %ymm0, %ymm3, %ymm0
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm2[0,8],zero,zero,zero,zero,zero,ymm2[1,9],zero,zero,zero,zero,zero,ymm2[2,10,18,26],zero,zero,zero,zero,zero,ymm2[19,27],zero,zero,zero,zero,zero,ymm2[20,28]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,ymm2[0,8],zero,zero,zero,zero,zero,ymm2[1,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[19,27],zero,zero,zero,zero,zero,ymm2[20,28],zero,zero
-; AVX512BW-FAST-NEXT:    vpor %ymm4, %ymm2, %ymm2
+; AVX512BW-FAST-NEXT:    vpor %ymm3, %ymm2, %ymm2
 ; AVX512BW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,2,4,6,0,2,4,6]
 ; AVX512BW-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
 ; AVX512BW-FAST-NEXT:    vpermd %ymm1, %ymm3, %ymm1
@@ -1194,8 +1194,8 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX1-ONLY-NEXT:    vpor %xmm12, %xmm13, %xmm12
 ; AVX1-ONLY-NEXT:    vmovdqa {{.*#+}} xmm13 = <u,255,255,0,0,u,u,u,255,255,0,0,u,u,u,255>
 ; AVX1-ONLY-NEXT:    vpblendvb %xmm13, %xmm11, %xmm12, %xmm11
-; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[u,u,u],zero,zero,xmm10[10,11,u,u,u],zero,zero,xmm10[12,13,u,u]
-; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u,u,10,11],zero,zero,xmm9[u,u,u,12,13],zero,zero,xmm9[u,u]
+; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,zero,zero,zero,xmm10[10,11],zero,zero,zero,zero,zero,xmm10[12,13],zero,zero
+; AVX1-ONLY-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm9[10,11],zero,zero,zero,zero,zero,xmm9[12,13],zero,zero,zero,zero
 ; AVX1-ONLY-NEXT:    vpor %xmm10, %xmm9, %xmm9
 ; AVX1-ONLY-NEXT:    vinsertf128 $1, %xmm11, %ymm9, %ymm9
 ; AVX1-ONLY-NEXT:    vmovaps {{.*#+}} ymm10 = [0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255,255,255,255,0,0,0,255]
@@ -3316,143 +3316,148 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm12
+; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdx), %ymm18
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm3
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm5
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %ymm6
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r10), %ymm4
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[20],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm8[2,3,2,3],zmm7[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm5[23],zero,ymm5[23,24,25,26],zero,ymm5[24],zero,ymm5[30,31]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm5[18],zero,ymm5[20,21,20,21],zero,ymm5[19],zero,ymm5[19,20,21,22],zero
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm9[2,3,2,3],zmm8[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vporq %zmm7, %zmm8, %zmm7
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm4[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[0,1,1,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,3,2]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm8
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[18],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22],zero,ymm3[20]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm9[2,3,2,3],zmm7[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512F-SLOW-NEXT:    vpandq %ymm16, %ymm9, %ymm9
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm12[18,19,20,21],zero,ymm12[19],zero,ymm12[25,26,27,22],zero,ymm12[20],zero
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm10, %zmm9
-; AVX512F-SLOW-NEXT:    vporq %zmm7, %zmm9, %zmm9
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[21],zero,ymm1[19],zero,zero,zero,zero,ymm1[22],zero,ymm1[20],zero,zero
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm10[2,3,2,3],zmm7[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm2[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[0,0,1,1,4,4,5,5]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
-; AVX512F-SLOW-NEXT:    vpandq %ymm17, %ymm10, %ymm10
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm12[23],zero,ymm12[21,22,23,26],zero,ymm12[24],zero,ymm12[28,29,26,27]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm20
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm10
-; AVX512F-SLOW-NEXT:    vporq %zmm7, %zmm10, %zmm7
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm7
-; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm7
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm11
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm11[u],zero,xmm11[7],zero,xmm11[5,u,u,u],zero,xmm11[8],zero,xmm11[6,u,u,u],zero
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm12
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm12[u,7],zero,xmm12[5],zero,xmm12[u,u,u,8],zero,xmm12[6],zero,xmm12[u,u,u,9]
-; AVX512F-SLOW-NEXT:    vpor %xmm8, %xmm9, %xmm8
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,0,1],zmm8[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm14
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm14[u,u,u],zero,xmm14[7],zero,xmm14[5,u,u,u],zero,xmm14[8],zero,xmm14[6,u,u]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm8[u,u,u,7],zero,xmm8[5],zero,xmm8[u,u,u,8],zero,xmm8[6],zero,xmm8[u,u]
-; AVX512F-SLOW-NEXT:    vpor %xmm10, %xmm13, %xmm10
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3],xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm18 = zmm13[0,1,0,1],zmm10[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm18
-; AVX512F-SLOW-NEXT:    vmovdqa (%r10), %xmm13
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm13[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm13[1,1,0,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm19 = <u,1,u,1,u,0,0,u,16,u,16,u,18,19,u,17>
-; AVX512F-SLOW-NEXT:    vpermi2d %zmm9, %zmm10, %zmm19
-; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm15
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = zero,xmm15[4,u,u,u],zero,xmm15[7],zero,xmm15[5,u,u,u],zero,xmm15[8],zero,xmm15[6]
-; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm9
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm9[4],zero,xmm9[u,u,u,7],zero,xmm9[5],zero,xmm9[u,u,u,8],zero,xmm9[6],zero
-; AVX512F-SLOW-NEXT:    vpor %xmm0, %xmm10, %xmm0
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3],xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm10[0,1,0,1],zmm0[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm19, %zmm10
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm10
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm8
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm8[u],zero,xmm8[7],zero,xmm8[5,u,u,u],zero,xmm8[8],zero,xmm8[6,u,u,u],zero
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm9
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm9[u,7],zero,xmm9[5],zero,xmm9[u,u,u,8],zero,xmm9[6],zero,xmm9[u,u,u,9]
+; AVX512F-SLOW-NEXT:    vpor %xmm7, %xmm10, %xmm7
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm7, %zmm10, %zmm7
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm7 = zmm7[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm13
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm13[u,u,u],zero,xmm13[7],zero,xmm13[5,u,u,u],zero,xmm13[8],zero,xmm13[6,u,u]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm14
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm14[u,u,u,7],zero,xmm14[5],zero,xmm14[u,u,u,8],zero,xmm14[6],zero,xmm14[u,u]
+; AVX512F-SLOW-NEXT:    vpor %xmm10, %xmm11, %xmm10
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm10, %zmm11, %zmm10
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm15 = zmm10[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm15
+; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm10
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = zero,xmm10[4,u,u,u],zero,xmm10[7],zero,xmm10[5,u,u,u],zero,xmm10[8],zero,xmm10[6]
+; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm11
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm11[4],zero,xmm11[u,u,u,7],zero,xmm11[5],zero,xmm11[u,u,u,8],zero,xmm11[6],zero
+; AVX512F-SLOW-NEXT:    vpor %xmm7, %xmm12, %xmm7
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm7, %zmm12, %zmm7
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm16 = zmm7[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vmovdqa (%r10), %xmm12
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm12[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm12[1,1,0,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm7 = zmm0[0,0,1,0,4,4,5,4]
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm7
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm15, %zmm7
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14,u,u],zero,zero,zero,zero,ymm1[15,u,u],zero,zero,zero,zero,ymm1[16,u,u],zero,zero,zero,zero,ymm1[17,u,u],zero,zero,zero,zero,ymm1[18]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm19
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,1,14],zero,ymm2[u,u,0,1,14,15],zero,ymm2[u,u,13,2,3,16],zero,ymm2[u,u,28,29,16,17],zero,ymm2[u,u,19,28,29,18],zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm18
-; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm14[8],xmm8[8],xmm14[9],xmm8[9],xmm14[10],xmm8[10],xmm14[11],xmm8[11],xmm14[12],xmm8[12],xmm14[13],xmm8[13],xmm14[14],xmm8[14],xmm14[15],xmm8[15]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u],zero,ymm3[14,u,u,u,u,u],zero,ymm3[15,u,u,u,u,u],zero,ymm3[16,u,u,u,u,u],zero,ymm3[17,u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,u,14],zero,ymm2[u,u,u,u,u,15],zero,ymm2[u,u,u,u,u,16],zero,ymm2[u,u,u,u,u,17],zero,ymm2[u,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpor %ymm1, %ymm8, %ymm1
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 = ymm2[0,1,14],zero,ymm2[u,u,0,1,14,15],zero,ymm2[u,u,13,2,3,16],zero,ymm2[u,u,28,29,16,17],zero,ymm2[u,u,19,28,29,18],zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm16
+; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm15, %ymm0
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm13 = xmm13[8],xmm14[8],xmm13[9],xmm14[9],xmm13[10],xmm14[10],xmm13[11],xmm14[11],xmm13[12],xmm14[12],xmm13[13],xmm14[13],xmm13[14],xmm14[14],xmm13[15],xmm14[15]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm13, %zmm0
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = ymm3[u,u,u,u],zero,ymm3[14,u,u,u,u,u],zero,ymm3[15,u,u,u,u,u],zero,ymm3[16,u,u,u,u,u],zero,ymm3[17,u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm2
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm2[u,u,u,u,14],zero,ymm2[u,u,u,u,u,15],zero,ymm2[u,u,u,u,u,16],zero,ymm2[u,u,u,u,u,17],zero,ymm2[u,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpor %ymm13, %ymm14, %ymm13
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm8[8],xmm9[8],xmm8[9],xmm9[9],xmm8[10],xmm9[10],xmm8[11],xmm9[11],xmm8[12],xmm9[12],xmm8[13],xmm9[13],xmm8[14],xmm9[14],xmm8[15],xmm9[15]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm1
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm8, %zmm9
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,ymm5[u,u,u,u,u,14],zero,ymm5[u,u,u,u,u,15],zero,ymm5[u,u,u,u,u,16],zero,ymm5[u,u,u,u,u,17],zero,ymm5[u,u,u]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm6[13,u,u,u,u,u],zero,ymm6[14,u,u,u,u,u],zero,ymm6[15,u,u,u,u,u],zero,ymm6[16,u,u,u,u,u],zero,ymm6[17,u,u,u]
 ; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm8, %ymm0
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm9[8],xmm15[8],xmm9[9],xmm15[9],xmm9[10],xmm15[10],xmm9[11],xmm15[11],xmm9[12],xmm15[12],xmm9[13],xmm15[13],xmm9[14],xmm15[14],xmm9[15],xmm15[15]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm11[8],xmm10[8],xmm11[9],xmm10[9],xmm11[10],xmm10[10],xmm11[11],xmm10[11],xmm11[12],xmm10[12],xmm11[13],xmm10[13],xmm11[14],xmm10[14],xmm11[15],xmm10[15]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm0
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm13[0,1,2,3,4,5,5,6]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm12[0,1,2,3,4,5,5,6]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; AVX512F-SLOW-NEXT:    vpandn %ymm8, %ymm9, %ymm8
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = zero,ymm4[13,u,u,u,u],zero,zero,ymm4[14,u,u,u,u],zero,zero,ymm4[15,u,u,u,u],zero,zero,ymm4[16,u,u,u,u],zero,zero,ymm4[17,u,u]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
+; AVX512F-SLOW-NEXT:    vpandn %ymm8, %ymm10, %ymm8
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = zero,ymm4[13,u,u,u,u],zero,zero,ymm4[14,u,u,u,u],zero,zero,ymm4[15,u,u,u,u],zero,zero,ymm4[16,u,u,u,u],zero,zero,ymm4[17,u,u]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
 ; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm8
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm8
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm8
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
+; AVX512F-SLOW-NEXT:    vpand %ymm0, %ymm9, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm2[18,19,20,21],zero,ymm2[19],zero,ymm2[25,26,27,22],zero,ymm2[20],zero
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm10, %zmm0
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[18],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22],zero,ymm3[20]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm14
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[23],zero,zero,zero,zero,ymm14[26],zero,ymm14[24],zero,zero,zero,zero,ymm14[27],zero,ymm14[25]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm10
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vporq %zmm10, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm14[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[0,0,1,1,4,4,5,5]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
+; AVX512F-SLOW-NEXT:    vpand %ymm11, %ymm10, %ymm10
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm2[23],zero,ymm2[21,22,23,26],zero,ymm2[24],zero,ymm2[28,29,26,27]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm10, %zmm10
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[21],zero,ymm1[19],zero,zero,zero,zero,ymm1[22],zero,ymm1[20],zero,zero
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm12
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm12 = zmm12[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vporq %zmm12, %zmm10, %zmm10
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm10
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[20],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm5[18],zero,ymm5[20,21,20,21],zero,ymm5[19],zero,ymm5[19,20,21,22],zero
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm5[23],zero,ymm5[23,24,25,26],zero,ymm5[24],zero,ymm5[30,31]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm12
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm12 = zmm12[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vporq %zmm0, %zmm12, %zmm0
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm4[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,3,2]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm12
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm12
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm12
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm16, %ymm0, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm9, %ymm0, %ymm5
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm17, %ymm1, %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm1
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm11, %ymm3, %ymm2
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm3
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm14[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,2,3,3,6,6,7,7]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm3
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
 ; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm10, (%rax)
 ; AVX512F-SLOW-NEXT:    vmovdqa %ymm3, 192(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm7, 128(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm7, (%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm12, 128(%rax)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm8, 64(%rax)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
@@ -3467,135 +3472,140 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm6
 ; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm1
 ; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa (%r10), %ymm0
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[21],zero,ymm4[19],zero,zero,zero,zero,ymm4[22],zero,ymm4[20],zero,zero
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm8[2,3,2,3],zmm7[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,ymm2[19],zero,ymm2[21,20,21,22],zero,ymm2[20],zero,ymm2[22,23]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm9[2,3,2,3],zmm8[2,3,2,3]
-; AVX512F-FAST-NEXT:    vporq %zmm7, %zmm8, %zmm7
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm9[2,3,2,3],zmm8[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512F-FAST-NEXT:    vpandq %ymm16, %ymm9, %ymm9
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm5[18,19,20,21],zero,ymm5[19],zero,ymm5[25,26,27,22],zero,ymm5[20],zero
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm10, %zmm9
-; AVX512F-FAST-NEXT:    vporq %zmm8, %zmm9, %zmm9
-; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm9
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[20],zero,ymm3[18],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm8[2,3,2,3],zmm7[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm1[23],zero,ymm1[23,24,25,26],zero,ymm1[24],zero,ymm1[30,31]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm1[18],zero,ymm1[20,21,20,21],zero,ymm1[19],zero,ymm1[19,20,21,22],zero
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm20
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm10[2,3,2,3],zmm8[2,3,2,3]
-; AVX512F-FAST-NEXT:    vporq %zmm7, %zmm8, %zmm7
-; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,5,4,u,5,u,4,u>
-; AVX512F-FAST-NEXT:    vpermd %ymm8, %ymm10, %ymm8
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm21
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm8
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm8
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm10
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm10[u],zero,xmm10[7],zero,xmm10[5,u,u,u],zero,xmm10[8],zero,xmm10[6,u,u,u],zero
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm11
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm11[u,7],zero,xmm11[5],zero,xmm11[u,u,u,8],zero,xmm11[6],zero,xmm11[u,u,u,9]
-; AVX512F-FAST-NEXT:    vpor %xmm7, %xmm9, %xmm7
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,0,1],zmm7[0,1,0,1]
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm15
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm15[u,u,u],zero,xmm15[7],zero,xmm15[5,u,u,u],zero,xmm15[8],zero,xmm15[6,u,u]
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm7
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm7[u,u,u,7],zero,xmm7[5],zero,xmm7[u,u,u,8],zero,xmm7[6],zero,xmm7[u,u]
-; AVX512F-FAST-NEXT:    vpor %xmm12, %xmm13, %xmm12
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm7[0],xmm15[0],xmm7[1],xmm15[1],xmm7[2],xmm15[2],xmm7[3],xmm15[3],xmm7[4],xmm15[4],xmm7[5],xmm15[5],xmm7[6],xmm15[6],xmm7[7],xmm15[7]
+; AVX512F-FAST-NEXT:    vmovdqa64 (%r10), %ymm17
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm8
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm8[u],zero,xmm8[7],zero,xmm8[5,u,u,u],zero,xmm8[8],zero,xmm8[6,u,u,u],zero
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm9
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm9[u,7],zero,xmm9[5],zero,xmm9[u,u,u,8],zero,xmm9[6],zero,xmm9[u,u,u,9]
+; AVX512F-FAST-NEXT:    vpor %xmm7, %xmm10, %xmm7
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm7, %zmm10, %zmm7
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm7 = zmm7[0,1,0,1,4,5,4,5]
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm11
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm10 = xmm11[u,u,u],zero,xmm11[7],zero,xmm11[5,u,u,u],zero,xmm11[8],zero,xmm11[6,u,u]
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm12
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm12[u,u,u,7],zero,xmm12[5],zero,xmm12[u,u,u,8],zero,xmm12[6],zero,xmm12[u,u]
+; AVX512F-FAST-NEXT:    vpor %xmm10, %xmm13, %xmm10
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 = zmm13[0,1,0,1],zmm12[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm17
-; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm12
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = zero,xmm12[4,u,u,u],zero,xmm12[7],zero,xmm12[5,u,u,u],zero,xmm12[8],zero,xmm12[6]
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm13
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm13[4],zero,xmm13[u,u,u,7],zero,xmm13[5],zero,xmm13[u,u,u,8],zero,xmm13[6],zero
-; AVX512F-FAST-NEXT:    vpor %xmm9, %xmm14, %xmm9
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm14[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm18 = zmm14[0,1,0,1],zmm9[0,1,0,1]
-; AVX512F-FAST-NEXT:    vmovdqa (%r10), %xmm14
-; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm14[1,1,0,0,4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = <u,1,u,1,u,0,0,u>
-; AVX512F-FAST-NEXT:    vpermd %ymm9, %ymm19, %ymm9
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm14[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm9, %zmm9
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm9
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm9
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm10, %zmm13, %zmm10
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm15 = zmm10[0,1,0,1,4,5,4,5]
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm15
+; AVX512F-FAST-NEXT:    vmovdqa (%r10), %xmm10
+; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm10[1,1,0,0,4,5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,1,u,1,u,0,0,u>
+; AVX512F-FAST-NEXT:    vpermd %ymm7, %ymm13, %ymm7
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm10[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,1,0]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm7, %zmm16
+; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm13
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = zero,xmm13[4,u,u,u],zero,xmm13[7],zero,xmm13[5,u,u,u],zero,xmm13[8],zero,xmm13[6]
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm14
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm14[4],zero,xmm14[u,u,u,7],zero,xmm14[5],zero,xmm14[u,u,u,8],zero,xmm14[6],zero
+; AVX512F-FAST-NEXT:    vpor %xmm7, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm7, %zmm0
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm7 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm7
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm15, %zmm7
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm4[14,u,u],zero,zero,zero,zero,ymm4[15,u,u],zero,zero,zero,zero,ymm4[16,u,u],zero,zero,zero,zero,ymm4[17,u,u],zero,zero,zero,zero,ymm4[18]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,1,14],zero,ymm2[u,u,0,1,14,15],zero,ymm2[u,u,13,2,3,16],zero,ymm2[u,u,28,29,16,17],zero,ymm2[u,u,19,28,29,18],zero
-; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm15[8],xmm7[8],xmm15[9],xmm7[9],xmm15[10],xmm7[10],xmm15[11],xmm7[11],xmm15[12],xmm7[12],xmm15[13],xmm7[13],xmm15[14],xmm7[14],xmm15[15],xmm7[15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm6[u,u,u,u],zero,ymm6[14,u,u,u,u,u],zero,ymm6[15,u,u,u,u,u],zero,ymm6[16,u,u,u,u,u],zero,ymm6[17,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm5[u,u,u,u,14],zero,ymm5[u,u,u,u,u,15],zero,ymm5[u,u,u,u,u,16],zero,ymm5[u,u,u,u,u,17],zero,ymm5[u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vpor %ymm1, %ymm7, %ymm1
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm10[8],xmm11[8],xmm10[9],xmm11[9],xmm10[10],xmm11[10],xmm10[11],xmm11[11],xmm10[12],xmm11[12],xmm10[13],xmm11[13],xmm10[14],xmm11[14],xmm10[15],xmm11[15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm7, %zmm1
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm20, %ymm11
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,ymm11[u,u,u,u,u,14],zero,ymm11[u,u,u,u,u,15],zero,ymm11[u,u,u,u,u,16],zero,ymm11[u,u,u,u,u,17],zero,ymm11[u,u,u]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm3[13,u,u,u,u,u],zero,ymm3[14,u,u,u,u,u],zero,ymm3[15,u,u,u,u,u],zero,ymm3[16,u,u,u,u,u],zero,ymm3[17,u,u,u]
-; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm7, %ymm0
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm7, %zmm0
-; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm14[0,1,2,3,4,5,5,6]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm2[0,1,14],zero,ymm2[u,u,0,1,14,15],zero,ymm2[u,u,13,2,3,16],zero,ymm2[u,u,28,29,16,17],zero,ymm2[u,u,19,28,29,18],zero
+; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm15, %ymm0
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm11, %zmm0
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm6[u,u,u,u],zero,ymm6[14,u,u,u,u,u],zero,ymm6[15,u,u,u,u,u],zero,ymm6[16,u,u,u,u,u],zero,ymm6[17,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm5[u,u,u,u,14],zero,ymm5[u,u,u,u,u,15],zero,ymm5[u,u,u,u,u,16],zero,ymm5[u,u,u,u,u,17],zero,ymm5[u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpor %ymm11, %ymm12, %ymm11
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm8[8],xmm9[8],xmm8[9],xmm9[9],xmm8[10],xmm9[10],xmm8[11],xmm9[11],xmm8[12],xmm9[12],xmm8[13],xmm9[13],xmm8[14],xmm9[14],xmm8[15],xmm9[15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm8, %zmm9
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm9
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,ymm1[u,u,u,u,u,14],zero,ymm1[u,u,u,u,u,15],zero,ymm1[u,u,u,u,u,16],zero,ymm1[u,u,u,u,u,17],zero,ymm1[u,u,u]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm3[13,u,u,u,u,u],zero,ymm3[14,u,u,u,u,u],zero,ymm3[15,u,u,u,u,u],zero,ymm3[16,u,u,u,u,u],zero,ymm3[17,u,u,u]
+; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm8, %ymm0
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm10[0,1,2,3,4,5,5,6]
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [2,2,3,3,2,2,3,3]
 ; AVX512F-FAST-NEXT:    # ymm10 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermd %ymm7, %ymm10, %ymm7
+; AVX512F-FAST-NEXT:    vpermd %ymm8, %ymm10, %ymm8
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; AVX512F-FAST-NEXT:    vpandn %ymm7, %ymm10, %ymm7
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm21, %ymm12
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = zero,ymm12[13,u,u,u,u],zero,zero,ymm12[14,u,u,u,u],zero,zero,ymm12[15,u,u,u,u],zero,zero,ymm12[16,u,u,u,u],zero,zero,ymm12[17,u,u]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm7, %zmm7
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm7
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm7
+; AVX512F-FAST-NEXT:    vpandn %ymm8, %ymm10, %ymm8
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm13
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = zero,ymm13[13,u,u,u,u],zero,zero,ymm13[14,u,u,u,u],zero,zero,ymm13[15,u,u,u,u],zero,zero,ymm13[16,u,u,u,u],zero,zero,ymm13[17,u,u]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm8
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm8
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
+; AVX512F-FAST-NEXT:    vpand %ymm0, %ymm9, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm5[18,19,20,21],zero,ymm5[19],zero,ymm5[25,26,27,22],zero,ymm5[20],zero
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm10, %zmm0
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[18],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm10
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vporq %zmm10, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[21],zero,ymm4[19],zero,zero,zero,zero,ymm4[22],zero,ymm4[20],zero,zero
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero,zero,zero
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm10
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm10 = zmm10[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,ymm2[19],zero,ymm2[21,20,21,22],zero,ymm2[20],zero,ymm2[22,23]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm5[23],zero,ymm5[21,22,23,26],zero,ymm5[24],zero,ymm5[28,29,26,27]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm11
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm11 = zmm11[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vporq %zmm10, %zmm11, %zmm10
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm10
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[20],zero,ymm3[18],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,ymm1[18],zero,ymm1[20,21,20,21],zero,ymm1[19],zero,ymm1[19,20,21,22],zero
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm1[23],zero,ymm1[23,24,25,26],zero,ymm1[24],zero,ymm1[30,31]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm11
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm11 = zmm11[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vporq %zmm0, %zmm11, %zmm0
+; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm13[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,5,4,u,5,u,4,u>
+; AVX512F-FAST-NEXT:    vpermd %ymm11, %ymm12, %ymm11
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm11
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm11
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm11
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm5
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero,zero
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm2[28],zero,ymm2[30,31,30,31],zero,ymm2[29],zero,ymm2[31,28,29]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm2, %ymm0
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[27],zero,zero,zero,zero,ymm11[30],zero,ymm11[28],zero,zero,zero,zero,ymm11[31],zero,ymm11[29]
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm16, %ymm1, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm1
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa %ymm1, 192(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm9, (%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, 128(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, 64(%rax)
+; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm9, %ymm2, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm2
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa %ymm2, 192(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, (%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm11, 128(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, 64(%rax)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -3603,81 +3613,81 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW:       # %bb.0:
 ; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %ymm14
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rsi), %ymm8
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdx), %ymm6
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rcx), %ymm10
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm6[0,1,0,1,14],zero,ymm6[14,15,0,1,14,15],zero,ymm6[13,14,15,16,17,16],zero,ymm6[30,31,30,31,16,17],zero,ymm6[31,28,29,30,31]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,ymm10[14],zero,zero,zero,zero,zero,zero,ymm10[15],zero,zero,zero,zero,zero,zero,ymm10[16],zero,zero,zero,zero,zero,zero,ymm10[17],zero,zero,zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rsi), %xmm4
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm1
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm14[0,1,14],zero,ymm14[12,13,0,1,14,15],zero,ymm14[3,12,13,2,3,16],zero,ymm14[30,31,28,29,16,17],zero,ymm14[31,18,19,28,29,18],zero
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,ymm8[14],zero,zero,zero,zero,zero,zero,ymm8[15],zero,zero,zero,zero,zero,zero,ymm8[16],zero,zero,zero,zero,zero,zero,ymm8[17],zero,zero,zero,zero,zero,zero,ymm8[18]
-; AVX512BW-SLOW-NEXT:    vpor %ymm0, %ymm2, %ymm0
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rdx), %xmm11
-; AVX512BW-SLOW-NEXT:    vmovdqa (%rcx), %xmm13
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm13[8],xmm11[8],xmm13[9],xmm11[9],xmm13[10],xmm11[10],xmm13[11],xmm11[11],xmm13[12],xmm11[12],xmm13[13],xmm11[13],xmm13[14],xmm11[14],xmm13[15],xmm11[15]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %ymm4
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rsi), %ymm2
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rdx), %ymm1
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rcx), %ymm3
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[0,1,0,1,14],zero,ymm1[14,15,0,1,14,15],zero,ymm1[13,14,15,16,17,16],zero,ymm1[30,31,30,31,16,17],zero,ymm1[31,28,29,30,31]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = zero,zero,zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero,zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vpor %ymm0, %ymm5, %ymm0
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %xmm9
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rsi), %xmm10
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm5, %zmm5
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm4[0,1,14],zero,ymm4[12,13,0,1,14,15],zero,ymm4[3,12,13,2,3,16],zero,ymm4[30,31,28,29,16,17],zero,ymm4[31,18,19,28,29,18],zero
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,ymm2[14],zero,zero,zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,zero,zero,ymm2[18]
+; AVX512BW-SLOW-NEXT:    vpor %ymm0, %ymm6, %ymm0
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rdx), %xmm12
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rcx), %xmm14
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm14[8],xmm12[8],xmm14[9],xmm12[9],xmm14[10],xmm12[10],xmm14[11],xmm12[11],xmm14[12],xmm12[12],xmm14[13],xmm12[13],xmm14[14],xmm12[14],xmm14[15],xmm12[15]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm6, %zmm0
 ; AVX512BW-SLOW-NEXT:    movabsq $435749858791416001, %rcx # imm = 0x60C1830183060C1
 ; AVX512BW-SLOW-NEXT:    kmovq %rcx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
-; AVX512BW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm1 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm1[13],zero,zero,zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = mem[0,1,2,3,0,1,2,3]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero
-; AVX512BW-SLOW-NEXT:    vpor %ymm5, %ymm7, %ymm5
-; AVX512BW-SLOW-NEXT:    vmovdqa (%r9), %xmm9
-; AVX512BW-SLOW-NEXT:    vmovdqa (%r8), %xmm12
-; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm12[8],xmm9[8],xmm12[9],xmm9[9],xmm12[10],xmm9[10],xmm12[11],xmm9[11],xmm12[12],xmm9[12],xmm12[13],xmm9[13],xmm12[14],xmm9[14],xmm12[15],xmm9[15]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm7, %zmm7
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm5, %zmm0 {%k1}
 ; AVX512BW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm5[13],zero,zero,zero,zero,zero,zero,ymm5[14],zero,zero,zero,zero,zero,zero,ymm5[15],zero,zero,zero,zero,zero,zero,ymm5[16],zero,zero,zero,zero,zero,zero,ymm5[17],zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm6 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,ymm6[14],zero,zero,zero,zero,zero,zero,ymm6[15],zero,zero,zero,zero,zero,zero,ymm6[16],zero,zero,zero,zero,zero,zero,ymm6[17],zero,zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vpor %ymm7, %ymm8, %ymm7
+; AVX512BW-SLOW-NEXT:    vmovdqa (%r9), %xmm11
+; AVX512BW-SLOW-NEXT:    vmovdqa (%r8), %xmm13
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm13[8],xmm11[8],xmm13[9],xmm11[9],xmm13[10],xmm11[10],xmm13[11],xmm11[11],xmm13[12],xmm11[12],xmm13[13],xmm11[13],xmm13[14],xmm11[14],xmm13[15],xmm11[15]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm8
+; AVX512BW-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm7 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
 ; AVX512BW-SLOW-NEXT:    # ymm15 = mem[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm5, %ymm15, %ymm15
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm16 = ymm5[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm7, %ymm15, %ymm15
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm16 = ymm7[12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
 ; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm15, %zmm15
 ; AVX512BW-SLOW-NEXT:    movabsq $2323999253380730912, %rcx # imm = 0x2040810204081020
 ; AVX512BW-SLOW-NEXT:    kmovq %rcx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm15, %zmm7 {%k1}
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm15, %zmm8 {%k1}
 ; AVX512BW-SLOW-NEXT:    movabsq $4066998693416279096, %rcx # imm = 0x3870E1C3870E1C38
 ; AVX512BW-SLOW-NEXT:    kmovq %rcx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm7, %zmm0 {%k1}
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm7
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} zmm7 = zmm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zmm7[18,19,20,21],zero,zmm7[19],zero,zmm7[25,26,27,22],zero,zmm7[20],zero,zmm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm7[55],zero,zero,zero,zero,zmm7[58],zero,zmm7[56],zero,zero,zero,zero,zmm7[59],zero
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm7 = zmm7[2,3,2,3,6,7,6,7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm10, %zmm15
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm8, %zmm0 {%k1}
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm8
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} zmm8 = zmm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zmm8[18,19,20,21],zero,zmm8[19],zero,zmm8[25,26,27,22],zero,zmm8[20],zero,zmm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm8[55],zero,zero,zero,zero,zmm8[58],zero,zmm8[56],zero,zero,zero,zero,zmm8[59],zero
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm8 = zmm8[2,3,2,3,6,7,6,7]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm15
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} zmm15 = zmm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm15[18],zero,zero,zero,zero,zmm15[21],zero,zmm15[19],zero,zero,zero,zero,zmm15[22],zero,zmm15[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm15[55],zero,zero,zero,zero,zmm15[58],zero,zmm15[56],zero,zero,zero,zero,zmm15[59],zero,zmm15[57]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm15 = zmm15[2,3,2,3,6,7,6,7]
-; AVX512BW-SLOW-NEXT:    vporq %zmm7, %zmm15, %zmm7
-; AVX512BW-SLOW-NEXT:    vpshuflw {{.*#+}} ymm15 = ymm14[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512BW-SLOW-NEXT:    vporq %zmm8, %zmm15, %zmm8
+; AVX512BW-SLOW-NEXT:    vpshuflw {{.*#+}} ymm15 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[0,0,1,1,4,4,5,5]
 ; AVX512BW-SLOW-NEXT:    movl $676341840, %ecx # imm = 0x28502850
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 {%k1} = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 {%k1} = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,21,u,19,u,u,u,u,22,u,20,u,u]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm16 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm6[23],zero,ymm6[21,22,23,26],zero,ymm6[24],zero,ymm6[28,29,26,27]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm16 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm1[23],zero,ymm1[21,22,23,26],zero,ymm1[24],zero,ymm1[28,29,26,27]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm16[2,3,2,3]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm17 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm10[25],zero,ymm10[23],zero,zero,zero,zero,ymm10[26],zero,ymm10[24],zero,zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm17 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[25],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm17[2,3,2,3]
 ; AVX512BW-SLOW-NEXT:    vporq %ymm16, %ymm17, %ymm16
 ; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm15, %zmm15
 ; AVX512BW-SLOW-NEXT:    movabsq $-9005497107459067808, %rcx # imm = 0x83060C180C183060
 ; AVX512BW-SLOW-NEXT:    kmovq %rcx, %k2
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm15, %zmm7 {%k2}
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm15, %zmm8 {%k2}
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
-; AVX512BW-SLOW-NEXT:    vpermw %zmm5, %zmm15, %zmm15
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} zmm16 = zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,zmm2[18],zero,zmm2[20,21,20,21],zero,zmm2[19],zero,zmm2[19,20,21,22],zero,zmm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57,56,57],zero,zmm2[55],zero,zmm2[55,56,57,58],zero,zmm2[56],zero,zmm2[62,63]
+; AVX512BW-SLOW-NEXT:    vpermw %zmm7, %zmm15, %zmm15
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} zmm16 = zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,zmm6[18],zero,zmm6[20,21,20,21],zero,zmm6[19],zero,zmm6[19,20,21,22],zero,zmm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57,56,57],zero,zmm6[55],zero,zmm6[55,56,57,58],zero,zmm6[56],zero,zmm6[62,63]
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm16 = zmm16[2,3,2,3,6,7,6,7]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} zmm17 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm1[20],zero,zmm1[18],zero,zero,zero,zero,zmm1[21],zero,zmm1[19],zero,zero,zero,zero,zmm1[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm1[57],zero,zmm1[55],zero,zero,zero,zero,zmm1[58],zero,zmm1[56],zero,zero
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} zmm17 = zmm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm5[20],zero,zmm5[18],zero,zero,zero,zero,zmm5[21],zero,zmm5[19],zero,zero,zero,zero,zmm5[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm5[57],zero,zmm5[55],zero,zero,zero,zero,zmm5[58],zero,zmm5[56],zero,zero
 ; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm17 = zmm17[2,3,2,3,6,7,6,7]
 ; AVX512BW-SLOW-NEXT:    vporq %zmm16, %zmm17, %zmm16
 ; AVX512BW-SLOW-NEXT:    movabsq $1161999626690365456, %rcx # imm = 0x1020408102040810
@@ -3685,68 +3695,71 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm15, %zmm16 {%k2}
 ; AVX512BW-SLOW-NEXT:    movabsq $2033499346708139548, %rcx # imm = 0x1C3870E1C3870E1C
 ; AVX512BW-SLOW-NEXT:    kmovq %rcx, %k2
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm16, %zmm7 {%k2}
-; AVX512BW-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[2,2,3,3,6,6,7,7]
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm16, %zmm8 {%k2}
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm14[u,u,u],zero,xmm14[7],zero,xmm14[5,u,u,u],zero,xmm14[8],zero,xmm14[6,u,u]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm16 = xmm12[u,u,u,7],zero,xmm12[5],zero,xmm12[u,u,u,8],zero,xmm12[6],zero,xmm12[u,u]
+; AVX512BW-SLOW-NEXT:    vporq %xmm15, %xmm16, %xmm15
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm12[0],xmm14[0],xmm12[1],xmm14[1],xmm12[2],xmm14[2],xmm12[3],xmm14[3],xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm15, %zmm12, %zmm12
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm12 = zmm12[0,1,0,1,4,5,4,5]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm14 = xmm10[u],zero,xmm10[7],zero,xmm10[5,u,u,u],zero,xmm10[8],zero,xmm10[6,u,u,u],zero
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm9[u,7],zero,xmm9[5],zero,xmm9[u,u,u,8],zero,xmm9[6],zero,xmm9[u,u,u,9]
+; AVX512BW-SLOW-NEXT:    vpor %xmm14, %xmm15, %xmm14
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3],xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm14, %zmm9, %zmm9
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm9 = zmm9[0,1,0,1,4,5,4,5]
+; AVX512BW-SLOW-NEXT:    movabsq $871499720017774092, %rcx # imm = 0xC183060C183060C
+; AVX512BW-SLOW-NEXT:    kmovq %rcx, %k2
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm12, %zmm9 {%k2}
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = zero,xmm11[4,u,u,u],zero,xmm11[7],zero,xmm11[5,u,u,u],zero,xmm11[8],zero,xmm11[6]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm13[4],zero,xmm13[u,u,u,7],zero,xmm13[5],zero,xmm13[u,u,u,8],zero,xmm13[6],zero
+; AVX512BW-SLOW-NEXT:    vpor %xmm10, %xmm12, %xmm10
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3],xmm13[4],xmm11[4],xmm13[5],xmm11[5],xmm13[6],xmm11[6],xmm13[7],xmm11[7]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm10, %zmm11, %zmm10
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm10 = zmm10[0,1,0,1,4,5,4,5]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm7, %zmm11
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
+; AVX512BW-SLOW-NEXT:    vpermw %zmm11, %zmm12, %zmm11
+; AVX512BW-SLOW-NEXT:    movabsq $4647998506761461824, %rcx # imm = 0x4081020408102040
+; AVX512BW-SLOW-NEXT:    kmovq %rcx, %k2
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm11, %zmm10 {%k2}
+; AVX512BW-SLOW-NEXT:    movabsq $8133997386832558192, %rcx # imm = 0x70E1C3870E1C3870
+; AVX512BW-SLOW-NEXT:    kmovq %rcx, %k2
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm10, %zmm9 {%k2}
+; AVX512BW-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,2,3,3,6,6,7,7]
 ; AVX512BW-SLOW-NEXT:    movl $338170920, %ecx # imm = 0x14281428
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k2
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 {%k2} = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm14[2,3,2,3]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u]
-; AVX512BW-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,2,3,3,4,6,7,7]
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm6, %ymm10 {%k1}
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm10[2,3,2,3]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 {%k2} = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm4[2,3,2,3]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u,29,u]
+; AVX512BW-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,2,3,3,4,6,7,7]
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm1, %ymm3 {%k1}
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm3[2,3,2,3]
 ; AVX512BW-SLOW-NEXT:    movl $101455920, %ecx # imm = 0x60C1830
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm8, %ymm6 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm13[u,u,u],zero,xmm13[7],zero,xmm13[5,u,u,u],zero,xmm13[8],zero,xmm13[6,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm11[u,u,u,7],zero,xmm11[5],zero,xmm11[u,u,u,8],zero,xmm11[6],zero,xmm11[u,u]
-; AVX512BW-SLOW-NEXT:    vpor %xmm8, %xmm10, %xmm8
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm11[0],xmm13[0],xmm11[1],xmm13[1],xmm11[2],xmm13[2],xmm11[3],xmm13[3],xmm11[4],xmm13[4],xmm11[5],xmm13[5],xmm11[6],xmm13[6],xmm11[7],xmm13[7]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm10[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm10[0,1,0,1],zmm8[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm4[u],zero,xmm4[7],zero,xmm4[5,u,u,u],zero,xmm4[8],zero,xmm4[6,u,u,u],zero
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm3[u,7],zero,xmm3[5],zero,xmm3[u,u,u,8],zero,xmm3[6],zero,xmm3[u,u,u,9]
-; AVX512BW-SLOW-NEXT:    vpor %xmm10, %xmm11, %xmm10
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,0,1],zmm10[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    movabsq $871499720017774092, %rcx # imm = 0xC183060C183060C
-; AVX512BW-SLOW-NEXT:    kmovq %rcx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm8, %zmm3 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,xmm9[4,u,u,u],zero,xmm9[7],zero,xmm9[5,u,u,u],zero,xmm9[8],zero,xmm9[6]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm12[4],zero,xmm12[u,u,u,7],zero,xmm12[5],zero,xmm12[u,u,u,8],zero,xmm12[6],zero
-; AVX512BW-SLOW-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm12[0],xmm9[0],xmm12[1],xmm9[1],xmm12[2],xmm9[2],xmm12[3],xmm9[3],xmm12[4],xmm9[4],xmm12[5],xmm9[5],xmm12[6],xmm9[6],xmm12[7],xmm9[7]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm8[0,1,0,1],zmm4[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm5, %zmm8
-; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
-; AVX512BW-SLOW-NEXT:    vpermw %zmm8, %zmm9, %zmm8
-; AVX512BW-SLOW-NEXT:    movabsq $4647998506761461824, %rcx # imm = 0x4081020408102040
-; AVX512BW-SLOW-NEXT:    kmovq %rcx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm8, %zmm4 {%k1}
-; AVX512BW-SLOW-NEXT:    movabsq $8133997386832558192, %rcx # imm = 0x70E1C3870E1C3870
-; AVX512BW-SLOW-NEXT:    kmovq %rcx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm4, %zmm3 {%k1}
-; AVX512BW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15]
-; AVX512BW-SLOW-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX512BW-SLOW-NEXT:    vpermw %ymm5, %ymm4, %ymm4
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29]
-; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512BW-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm2, %ymm1 {%k1}
+; AVX512BW-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15]
+; AVX512BW-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512BW-SLOW-NEXT:    vpermw %ymm7, %ymm2, %ymm2
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX512BW-SLOW-NEXT:    vpor %ymm3, %ymm4, %ymm3
 ; AVX512BW-SLOW-NEXT:    movl $-2130574328, %ecx # imm = 0x81020408
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm4, %ymm1 {%k1}
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm3, (%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm2, %ymm3 {%k1}
 ; AVX512BW-SLOW-NEXT:    movl $-507279602, %ecx # imm = 0xE1C3870E
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm1, %ymm6 {%k1}
-; AVX512BW-SLOW-NEXT:    vmovdqa %ymm6, 192(%rax)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm7, 128(%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqu8 %ymm3, %ymm1 {%k1}
+; AVX512BW-SLOW-NEXT:    vmovdqa %ymm1, 192(%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm8, 128(%rax)
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, 64(%rax)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
 ; AVX512BW-SLOW-NEXT:    retq
@@ -3838,13 +3851,15 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vporq %xmm15, %xmm16, %xmm15
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm12[0],xmm14[0],xmm12[1],xmm14[1],xmm12[2],xmm14[2],xmm12[3],xmm14[3],xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm12 = zmm12[0,1,0,1],zmm15[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm15, %zmm12, %zmm12
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm12 = zmm12[0,1,0,1,4,5,4,5]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm9[u],zero,xmm9[7],zero,xmm9[5,u,u,u],zero,xmm9[8],zero,xmm9[6,u,u,u],zero
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm15 = xmm8[u,7],zero,xmm8[5],zero,xmm8[u,u,u,8],zero,xmm8[6],zero,xmm8[u,u,u,9]
 ; AVX512BW-FAST-NEXT:    vpor %xmm14, %xmm15, %xmm14
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1],xmm8[2],xmm9[2],xmm8[3],xmm9[3],xmm8[4],xmm9[4],xmm8[5],xmm9[5],xmm8[6],xmm9[6],xmm8[7],xmm9[7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm8[0,1,0,1],zmm14[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm14, %zmm8, %zmm8
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm8 = zmm8[0,1,0,1,4,5,4,5]
 ; AVX512BW-FAST-NEXT:    movabsq $871499720017774092, %rcx # imm = 0xC183060C183060C
 ; AVX512BW-FAST-NEXT:    kmovq %rcx, %k1
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm12, %zmm8 {%k1}
@@ -3853,7 +3868,8 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vpor %xmm9, %xmm12, %xmm9
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3],xmm13[4],xmm11[4],xmm13[5],xmm11[5],xmm13[6],xmm11[6],xmm13[7],xmm11[7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm11[0,1,0,1],zmm9[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm9, %zmm11, %zmm9
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm9 = zmm9[0,1,0,1,4,5,4,5]
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm7, %zmm11
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
 ; AVX512BW-FAST-NEXT:    vpermw %zmm11, %zmm12, %zmm11
@@ -3887,11 +3903,11 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    movl $-2130574328, %ecx # imm = 0x81020408
 ; AVX512BW-FAST-NEXT:    kmovd %ecx, %k1
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm2, %ymm3 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm8, (%rax)
 ; AVX512BW-FAST-NEXT:    movl $-507279602, %ecx # imm = 0xE1C3870E
 ; AVX512BW-FAST-NEXT:    kmovd %ecx, %k1
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %ymm3, %ymm1 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqa %ymm1, 192(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm8, (%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm10, 128(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, 64(%rax)
 ; AVX512BW-FAST-NEXT:    vzeroupper
@@ -7170,473 +7186,480 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512F-SLOW-LABEL: store_i8_stride7_vf64:
 ; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    subq $1640, %rsp # imm = 0x668
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm7
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm7[14],zero,zero,zero,zero,zero,zero,ymm7[15],zero,zero,zero,zero,zero,zero,ymm7[16],zero,zero,zero,zero,zero,zero,ymm7[17],zero,zero,zero,zero,zero,zero,ymm7[18]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm8
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm8[0,1,14],zero,ymm8[12,13,0,1,14,15],zero,ymm8[3,12,13,2,3,16],zero,ymm8[30,31,28,29,16,17],zero,ymm8[31,18,19,28,29,18],zero
+; AVX512F-SLOW-NEXT:    subq $1368, %rsp # imm = 0x558
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm13
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm13, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm16
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm15
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm15, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm18
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm13
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm5
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm13, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm16
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm12
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm15, %ymm12, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm5, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm25
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm7
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm7, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm28
 ; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,ymm0[17],zero,zero,zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm23
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %ymm1
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = <13,u,u,u,u,u,128,14,u,u,u,u,u,128,15,u,u,u,u,u,128,16,u,u,u,u,u,128,17,u,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm31
-; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <13,u,u,u,u,u,128,14,u,u,u,u,u,128,15,u,u,u,u,u,128,16,u,u,u,u,u,128,17,u,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm29
+; AVX512F-SLOW-NEXT:    vporq %ymm0, %ymm1, %ymm24
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %ymm12
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm2
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[27],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29]
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm2, %ymm10
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm12[25],zero,ymm12[23],zero,zero,zero,zero,ymm12[26],zero,ymm12[24],zero,zero
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm11
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm11[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm6
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <9,u,7,u,u,u,u,10,u,8,u,u,u,u,11,u,9,u,7,u,u,u,u,10,u,8,u,u,u,u,11,u>
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [4,5,6,7,4,5,6,7,22,22,23,23,22,22,23,23]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm6, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm25
-; AVX512F-SLOW-NEXT:    vpermi2d %zmm0, %zmm1, %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm11, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm21
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm11, %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm8
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm9
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero,ymm9[29],zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm8, %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm4
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm2, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm22
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm2, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm11
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm11[30],zero,ymm11[28],zero,zero,zero,zero,ymm11[31],zero,ymm11[29],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm6
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm6, %ymm3
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rax), %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm5, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm5[18],zero,zero,zero,zero,ymm5[21],zero,ymm5[19],zero,zero,zero,zero,ymm5[22],zero,ymm5[20]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm11
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[27],zero,zero,zero,zero,ymm11[30],zero,ymm11[28],zero,zero,zero,zero,ymm11[31],zero,ymm11[29]
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm11, %ymm14
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm11, (%rsp) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %ymm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm2, %ymm11
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm7[23],zero,ymm7[21,22,23,26],zero,ymm7[24],zero,ymm7[28,29,26,27]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm7[18,19,20,21],zero,ymm7[19],zero,ymm7[25,26,27,22],zero,ymm7[20],zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm7, %ymm26
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rax), %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm15, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm13[21],zero,ymm13[19],zero,zero,zero,zero,ymm13[22],zero,ymm13[20],zero,zero
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm28
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm2, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm26
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm20
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm14, %xmm30
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm2, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm14, %xmm21
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm27
 ; AVX512F-SLOW-NEXT:    vpor %xmm0, %xmm1, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm19
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm9, %xmm2, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm18
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm27
-; AVX512F-SLOW-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %xmm9
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %xmm10
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm9, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm24
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm17
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm10, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm23
-; AVX512F-SLOW-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm15, %ymm4, %ymm1
-; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm6[14],zero,zero,zero,zero,zero,zero,ymm6[15],zero,zero,zero,zero,zero,zero,ymm6[16],zero,zero,zero,zero,zero,zero,ymm6[17],zero,zero,zero,zero,zero,zero,ymm6[18]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm29
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm5[0,1,14],zero,ymm5[12,13,0,1,14,15],zero,ymm5[3,12,13,2,3,16],zero,ymm5[30,31,28,29,16,17],zero,ymm5[31,18,19,28,29,18],zero
-; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,ymm14[14],zero,zero,zero,zero,zero,zero,ymm14[15],zero,zero,zero,zero,zero,zero,ymm14[16],zero,zero,zero,zero,zero,zero,ymm14[17],zero,zero,zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm31, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm11, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm11, %ymm30
-; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm7, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [4,4,5,5,4,4,5,5,20,21,22,23,20,21,22,23]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm8[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpermi2d %zmm0, %zmm1, %zmm2
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm0, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm3, %xmm3
+; AVX512F-SLOW-NEXT:    vporq %xmm2, %xmm3, %xmm22
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm3, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm19
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm4, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm17
+; AVX512F-SLOW-NEXT:    vpor %xmm2, %xmm3, %xmm2
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm8, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm7[21],zero,ymm7[19],zero,zero,zero,zero,ymm7[22],zero,ymm7[20],zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm7, %ymm8
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm13, %ymm9
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm13, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm13[18],zero,zero,zero,zero,ymm13[21],zero,ymm13[19],zero,zero,zero,zero,ymm13[22],zero,ymm13[20]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm12, %ymm15
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm12[23],zero,ymm12[21,22,23,26],zero,ymm12[24],zero,ymm12[28,29,26,27]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm12[18,19,20,21],zero,ymm12[19],zero,ymm12[25,26,27,22],zero,ymm12[20],zero
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm7, %xmm14
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm2
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm8, %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm3
+; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm9, %ymm3
+; AVX512F-SLOW-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm2
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm11, %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm11, %ymm15
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm3
+; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm6, %ymm3
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm6, %ymm13
+; AVX512F-SLOW-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm2
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm10, %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm25
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm3
+; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm12, %ymm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm29
+; AVX512F-SLOW-NEXT:    vpor %ymm2, %ymm3, %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm2
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm16
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm7, %xmm2
-; AVX512F-SLOW-NEXT:    vpor %xmm0, %xmm2, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm28, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm4, %xmm2
-; AVX512F-SLOW-NEXT:    vpor %xmm0, %xmm2, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = <u,1,u,1,u,0,0,u,16,u,16,u,18,19,u,17>
-; AVX512F-SLOW-NEXT:    vmovdqa (%rax), %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm0[1,1,0,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpermi2d %zmm2, %zmm3, %zmm21
-; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm2
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm7, %xmm0
+; AVX512F-SLOW-NEXT:    vpor %xmm1, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm11
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm11, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm21, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm8, %xmm4
+; AVX512F-SLOW-NEXT:    vpor %xmm1, %xmm4, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm24, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm2, %xmm12
-; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm3
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm3, %xmm13
-; AVX512F-SLOW-NEXT:    vpor %xmm12, %xmm13, %xmm1
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm2, %xmm5
+; AVX512F-SLOW-NEXT:    vpor %xmm1, %xmm5, %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm12 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm11, %xmm12, %xmm12
-; AVX512F-SLOW-NEXT:    vshufi64x2 $64, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm1 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm1 = zmm12[0,1,0,1],mem[0,1,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; AVX512F-SLOW-NEXT:    vpandn %ymm0, %ymm12, %ymm12
-; AVX512F-SLOW-NEXT:    vmovdqa (%rax), %ymm6
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [128,13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm6, %ymm13
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm28
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm19
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm15[30],zero,ymm15[28],zero,zero,zero,zero,ymm15[31],zero,ymm15[29],zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm25
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm8[30],zero,ymm8[28],zero,zero,zero,zero,ymm8[31],zero,ymm8[29],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm1
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm20
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm5[0,1,0,1],zmm1[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa (%rax), %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,5,6]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
+; AVX512F-SLOW-NEXT:    vpandn %ymm1, %ymm9, %ymm9
+; AVX512F-SLOW-NEXT:    vmovdqa (%rax), %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = zero,ymm0[13],zero,zero,zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,ymm0[17],zero,zero
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm9, %zmm21
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero,ymm0[29],zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm30
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <13,u,11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u,11,u,u,u,u,14,u,12,u,u,u,u,15,u>
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm1, %ymm13
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm26
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm15 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[27],zero,zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero,ymm3[29]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm13, %ymm15
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm12
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm13 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3],xmm10[4],xmm12[4],xmm10[5],xmm12[5],xmm10[6],xmm12[6],xmm10[7],xmm12[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm13[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm15[0,1,2,3],zmm13[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm6, %ymm12
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm28
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[27],zero,zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm12, %ymm10
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm9
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm1
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3],xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm10[0,1,2,3],zmm12[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm10 = ymm13[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm13, %ymm26
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm10[2,2,3,3,6,6,7,7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = <9,u,7,u,u,u,u,10,u,8,u,u,u,u,11,u,9,u,7,u,u,u,u,10,u,8,u,u,u,u,11,u>
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm15, %ymm3
+; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm15, %ymm15
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm15, %zmm23
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm13[30],zero,ymm13[28],zero,zero,zero,zero,ymm13[31],zero,ymm13[29],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm18
+; AVX512F-SLOW-NEXT:    vpshufb %ymm12, %ymm13, %ymm12
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm10[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm13[0,0,1,1,4,4,5,5]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm13, %zmm24
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm8, %xmm20
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm13 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm15 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm22
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm23
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm15 = xmm11[8],xmm8[8],xmm11[9],xmm8[9],xmm11[10],xmm8[10],xmm11[11],xmm8[11],xmm11[12],xmm8[12],xmm11[13],xmm8[13],xmm11[14],xmm8[14],xmm11[15],xmm8[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm15, %xmm2
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm13, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm2[0,1,0,1],zmm0[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm0
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm27
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm13 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm15 = xmm14[8],xmm7[8],xmm14[9],xmm7[9],xmm14[10],xmm7[10],xmm14[11],xmm7[11],xmm14[12],xmm7[12],xmm14[13],xmm7[13],xmm14[14],xmm7[14],xmm14[15],xmm7[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm14, %xmm16
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm15 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm2
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm13 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm15, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm13, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm17 = zmm2[0,1,0,1],zmm0[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm13 = xmm10[8],xmm12[8],xmm10[9],xmm12[9],xmm10[10],xmm12[10],xmm10[11],xmm12[11],xmm10[12],xmm12[12],xmm10[13],xmm12[13],xmm10[14],xmm12[14],xmm10[15],xmm12[15]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm11, %xmm13, %xmm13
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm13, %xmm12
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm12, %ymm31
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm15, %xmm0
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm22, %zmm22
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm15 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm15, %xmm15
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1],zmm13[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1],zmm15[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = <11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u,11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm9, %ymm4
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = <11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u,11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u>
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm0, %ymm9
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm14, %ymm18
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm10[23],zero,ymm10[21,22,23,26],zero,ymm10[24],zero,ymm10[28,29,26,27]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm10[18,19,20,21],zero,ymm10[19],zero,ymm10[25,26,27,22],zero,ymm10[20],zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20],zero,zero
+; AVX512F-SLOW-NEXT:    vpshufb %ymm15, %ymm0, %ymm1
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm5
-; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm0, %ymm13
+; AVX512F-SLOW-NEXT:    vpshufb %ymm15, %ymm0, %ymm13
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm20
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm0[23],zero,ymm0[21,22,23,26],zero,ymm0[24],zero,ymm0[28,29,26,27]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm0[18,19,20,21],zero,ymm0[19],zero,ymm0[25,26,27,22],zero,ymm0[20],zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm19
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22],zero,ymm3[20],zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm17
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm15
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm15, %ymm9
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm6, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm6[25],zero,ymm6[23],zero,zero,zero,zero,ymm6[26],zero,ymm6[24],zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm15, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31>
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm25, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm0, %ymm3
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm4, %ymm15
+; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm4, %ymm4
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm29
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm15, %ymm4
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm28
+; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = zero,ymm0[13],zero,zero,zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,ymm0[17],zero,zero
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm15 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm15 = ymm15[0,1,1,3,4,5,5,7]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,3,2]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; AVX512F-SLOW-NEXT:    vpandnq %ymm15, %ymm29, %ymm15
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm13, %zmm29
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm30, %ymm11
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm11, %ymm10
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm11, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm30
-; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm0, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm31
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25,128,23,128,23,24,25,26,128,24,128,30,31>
-; AVX512F-SLOW-NEXT:    vmovdqu (%rsp), %ymm11 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %ymm15, %ymm11, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm11, %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm28
-; AVX512F-SLOW-NEXT:    vpshufb %ymm15, %ymm3, %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm26
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm27
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm0
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm25[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm0 = zmm0[0,1,0,1],mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb %xmm3, %xmm0, %xmm15
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm20
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm3 = mem[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshufhw $190, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm13 = mem[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm13 = ymm13[2,2,3,3,6,6,7,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm2[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm10[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm1[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm18[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm14[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw $233, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm14 = mem[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[0,0,1,1,4,4,5,5]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm11
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3],xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm11, %xmm11
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm1, %ymm4, %ymm5
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm5, %zmm4
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm13[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm5
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm25 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
+; AVX512F-SLOW-NEXT:    vpandnq %ymm15, %ymm25, %ymm15
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm6, %zmm6
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3],xmm7[4],xmm2[4],xmm7[5],xmm2[5],xmm7[6],xmm2[6],xmm7[7],xmm2[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm5, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm7, %xmm7
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm25 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm1[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm30[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm8[0],xmm11[0],xmm8[1],xmm11[1],xmm8[2],xmm11[2],xmm8[3],xmm11[3],xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm14, %xmm14
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm18[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm10[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,2,3,3,6,6,7,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm9[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm3[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm13[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm12[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm20[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm19[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm17[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm1
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,1,1,4,4,5,5]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm2, %xmm2
+; AVX512F-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm4 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vporq %zmm4, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
+; AVX512F-SLOW-NEXT:    vpand %ymm4, %ymm11, %ymm8
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm9, %zmm8
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm9 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vporq %zmm9, %zmm8, %zmm8
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm9 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm11 = zmm23[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm11
+; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm11
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm8, %zmm11
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm0 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm9 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vporq %zmm0, %zmm9, %zmm0
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm9 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm17 = zmm24[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm17
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm8, %zmm17
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm27[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm9 = zmm22[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm8, %zmm9
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm4, %ymm16, %ymm15
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm1[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpternlogq $236, %ymm4, %ymm10, %ymm0
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm15, %zmm1
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm7[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm18, %ymm4
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm5, %zmm3
-; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm12, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm0
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm14[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $236, %ymm1, %ymm2, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm5, %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm4
-; AVX512F-SLOW-NEXT:    vpand %ymm1, %ymm9, %ymm0
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm7, %zmm0
-; AVX512F-SLOW-NEXT:    vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm0
-; AVX512F-SLOW-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm16, %ymm1
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm10, %zmm1
-; AVX512F-SLOW-NEXT:    vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm0, %zmm4, %zmm1
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm24, %zmm4, %zmm17
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm5
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm5, %zmm4, %zmm0
-; AVX512F-SLOW-NEXT:    vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm4 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm4 = zmm11[0,1,0,1],mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm5 # 16-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # xmm5 = xmm7[0],mem[0],xmm7[1],mem[1],xmm7[2],mem[2],xmm7[3],mem[3],xmm7[4],mem[4],xmm7[5],mem[5],xmm7[6],mem[6],xmm7[7],mem[7]
-; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm8 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm25 = ymm9[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm1, %zmm5, %zmm4
+; AVX512F-SLOW-NEXT:    vpor %ymm13, %ymm12, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm5, %zmm0
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm20 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm31[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm5 = mem[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # xmm8 = xmm8[0],mem[0],xmm8[1],mem[1],xmm8[2],mem[2],xmm8[3],mem[3],xmm8[4],mem[4],xmm8[5],mem[5],xmm8[6],mem[6],xmm8[7],mem[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rax), %xmm10
-; AVX512F-SLOW-NEXT:    vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm5 = zmm5[0,1,0,1],mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm10[1,1,0,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm11[0,1,2,0]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm10[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,1,0]
+; AVX512F-SLOW-NEXT:    vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm8 = zmm8[0,1,0,1],mem[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm1[1,1,0,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[0,1,2,0]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = [4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm1, %xmm14
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm12, %zmm12
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm10[1,1,0,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm14 = xmm14[0,1,2,0]
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm15 = mem[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm29[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm18 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm18 = mem[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm19 = ymm28[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm3[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,1,1,3,4,5,5,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm10, %xmm13
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,1,0]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,5,5,6]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[2,2,3,3]
-; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm13 = mem[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm28[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm30[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm26[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm31[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm23 = ymm27[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm6[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,1,1,3,4,5,5,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm8
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm19
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm19
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm11[0,0,1,0]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm25, %zmm7
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm7
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm7
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm8
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm10[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm12, %zmm1
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm1
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm22 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm22 = mem[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm24 = mem[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm5 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm5
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm21
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm21
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm14[0,0,1,0]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm7, %zmm2
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm2
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm2
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm4 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm4
+; AVX512F-SLOW-NEXT:    vporq %ymm15, %ymm16, %ymm5
+; AVX512F-SLOW-NEXT:    vporq %ymm18, %ymm19, %ymm7
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm7[0,1,2,3],zmm5[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,3,2]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm1
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm17, %zmm1
-; AVX512F-SLOW-NEXT:    vpor %ymm13, %ymm14, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm10[0,1,2,3],zmm3[0,1,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm29
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm29
-; AVX512F-SLOW-NEXT:    vporq %ymm15, %ymm16, %ymm2
-; AVX512F-SLOW-NEXT:    vporq %ymm20, %ymm23, %ymm3
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm3[0,1,2,3],zmm2[0,1,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm6[2,2,3,2]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm3, %zmm3
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm3
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm3
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm4
-; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm21, %zmm5
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm5
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm10[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm13, %zmm3
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm3
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm3
+; AVX512F-SLOW-NEXT:    vporq %ymm22, %ymm24, %ymm5
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm7[0,1,2,3],zmm5[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm6
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm25[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm5 = zmm20[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm5
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm12[0,0,1,0,4,4,5,4]
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm0
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm0
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm3, 128(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm29, 320(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm1, 256(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm8, 384(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm7, 192(%rax)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm19, 64(%rax)
-; AVX512F-SLOW-NEXT:    addq $1640, %rsp # imm = 0x668
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, (%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm6, 320(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm3, 256(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm1, 128(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm4, 384(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, 192(%rax)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm21, 64(%rax)
+; AVX512F-SLOW-NEXT:    addq $1368, %rsp # imm = 0x558
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
 ; AVX512F-FAST-LABEL: store_i8_stride7_vf64:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    subq $1560, %rsp # imm = 0x618
+; AVX512F-FAST-NEXT:    subq $1432, %rsp # imm = 0x598
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
-; AVX512F-FAST-NEXT:    vmovdqa %ymm2, %ymm14
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm1
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vmovdqa %ymm2, %ymm4
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[2,3,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm17
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm15
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm15[30],zero,ymm15[28],zero,zero,zero,zero,ymm15[31],zero,ymm15[29],zero,zero,zero
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm15, %ymm17
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm2
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
-; AVX512F-FAST-NEXT:    vmovdqa %ymm2, %ymm10
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[2,3,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm3
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm8
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero,ymm3[29],zero,zero,zero
+; AVX512F-FAST-NEXT:    vmovdqa %ymm3, %ymm10
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm8[23],zero,zero,zero,zero,ymm8[26],zero,ymm8[24],zero,zero,zero,zero,ymm8[27],zero,ymm8[25]
+; AVX512F-FAST-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %ymm2
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm1
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
-; AVX512F-FAST-NEXT:    vmovdqa %ymm1, %ymm7
+; AVX512F-FAST-NEXT:    vmovdqa %ymm1, %ymm15
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %ymm2
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm22
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[2,3,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa %ymm2, %ymm6
+; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rax), %ymm1
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[2,3,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -7646,382 +7669,392 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,1,14],zero,ymm1[12,13,0,1,14,15],zero,ymm1[3,12,13,2,3,16],zero,ymm1[30,31,28,29,16,17],zero,ymm1[31,18,19,28,29,18],zero
 ; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm13
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm13, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm27
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm28
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm1, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm24
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm16
 ; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm1, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm25
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <13,u,u,u,u,u,128,14,u,u,u,u,u,128,15,u,u,u,u,u,128,16,u,u,u,u,u,128,17,u,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm26
-; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm31
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm11, %xmm20
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm12
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
-; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm12, %xmm3
-; AVX512F-FAST-NEXT:    vporq %xmm2, %xmm3, %xmm21
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %xmm15
-; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
-; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm15, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm15, %xmm29
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
-; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm0, %xmm16
-; AVX512F-FAST-NEXT:    vpor %xmm2, %xmm3, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm19, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm4, %ymm24
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm28, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm14, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm14, %ymm23
-; AVX512F-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm15
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,ymm15[14],zero,zero,zero,zero,zero,zero,ymm15[15],zero,zero,zero,zero,zero,zero,ymm15[16],zero,zero,zero,zero,zero,zero,ymm15[17],zero,zero,zero,zero,zero,zero,ymm15[18]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm10[0,1,14],zero,ymm10[12,13,0,1,14,15],zero,ymm10[3,12,13,2,3,16],zero,ymm10[30,31,28,29,16,17],zero,ymm10[31,18,19,28,29,18],zero
-; AVX512F-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,ymm7[14],zero,zero,zero,zero,zero,zero,ymm7[15],zero,zero,zero,zero,zero,zero,ymm7[16],zero,zero,zero,zero,zero,zero,ymm7[17],zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm22, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqu64 %ymm22, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm3
-; AVX512F-FAST-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm2
-; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm2, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm19
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm18
-; AVX512F-FAST-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm20, %xmm0
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm4
-; AVX512F-FAST-NEXT:    vpshufb %xmm9, %xmm4, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[13,u,u,u,u,u],zero,ymm1[14,u,u,u,u,u],zero,ymm1[15,u,u,u,u,u],zero,ymm1[16,u,u,u,u,u],zero,ymm1[17,u,u,u]
+; AVX512F-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm22
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm3
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm23
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm30
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm3, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm27
 ; AVX512F-FAST-NEXT:    vpor %xmm0, %xmm1, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm1
-; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa %xmm1, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm5
+; AVX512F-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm3
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm20
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm5, %xmm3
+; AVX512F-FAST-NEXT:    vporq %xmm0, %xmm3, %xmm21
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r9), %xmm12
+; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %xmm11
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
+; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm12, %xmm3
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm12, %xmm29
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm11, %xmm5
+; AVX512F-FAST-NEXT:    vpor %xmm3, %xmm5, %xmm3
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm3, %ymm3
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm16, %ymm5
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm19, %ymm12
+; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm12, %ymm5
+; AVX512F-FAST-NEXT:    vpor %ymm3, %ymm5, %ymm3
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,ymm10[14],zero,zero,zero,zero,zero,zero,ymm10[15],zero,zero,zero,zero,zero,zero,ymm10[16],zero,zero,zero,zero,zero,zero,ymm10[17],zero,zero,zero,zero,zero,zero,ymm10[18]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm10, %ymm18
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm8[0,1,14],zero,ymm8[12,13,0,1,14,15],zero,ymm8[3,12,13,2,3,16],zero,ymm8[30,31,28,29,16,17],zero,ymm8[31,18,19,28,29,18],zero
+; AVX512F-FAST-NEXT:    vpor %ymm3, %ymm5, %ymm3
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm15, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm5 = ymm6[13,u,u,u,u,u],zero,ymm6[14,u,u,u,u,u],zero,ymm6[15,u,u,u,u,u],zero,ymm6[16,u,u,u,u,u],zero,ymm6[17,u,u,u]
+; AVX512F-FAST-NEXT:    vpor %ymm3, %ymm5, %ymm3
+; AVX512F-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm14
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm14, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm3
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm16
+; AVX512F-FAST-NEXT:    vpor %xmm1, %xmm4, %xmm1
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm3
-; AVX512F-FAST-NEXT:    vpshufb %xmm11, %xmm3, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-FAST-NEXT:    vpor %xmm0, %xmm1, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm10
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm23, %xmm1
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm10, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm9
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm9, %xmm4
+; AVX512F-FAST-NEXT:    vpor %xmm1, %xmm4, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa (%r9), %xmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm2, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa %xmm2, %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %xmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa %xmm2, %xmm5
+; AVX512F-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa %ymm13, %ymm8
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm13[25],zero,ymm13[23],zero,zero,zero,zero,ymm13[26],zero,ymm13[24],zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm13[18],zero,zero,zero,zero,ymm13[21],zero,ymm13[19],zero,zero,zero,zero,ymm13[22],zero,ymm13[20]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[2,3,2,3]
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero,ymm0[27],zero,ymm0[25]
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm23, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[21],zero,ymm1[19],zero,zero,zero,zero,ymm1[22],zero,ymm1[20],zero,zero
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27>
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm27, %ymm13
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm13, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm13[18,19,20,21],zero,ymm13[19],zero,ymm13[25,26,27,22],zero,ymm13[20],zero
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm27, %ymm10
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[2,3,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm24, %ymm7
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm7[25],zero,ymm7[23],zero,zero,zero,zero,ymm7[26],zero,ymm7[24],zero,zero,zero,zero
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm7[18],zero,zero,zero,zero,ymm7[21],zero,ymm7[19],zero,zero,zero,zero,ymm7[22],zero,ymm7[20]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %ymm20 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm20, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero,ymm0[27],zero,ymm0[25]
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm9[21],zero,ymm9[19],zero,zero,zero,zero,ymm9[22],zero,ymm9[20],zero,zero
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[2,3,2,3],zmm0[2,3,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27>
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm2, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm2[18,19,20,21],zero,ymm2[19],zero,ymm2[25,26,27,22],zero,ymm2[20],zero
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10>
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,0,1],zmm31[0,1,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm22, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10>
+; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[4,5,6,7]
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm17 = [2,2,3,3,2,2,3,3]
-; AVX512F-FAST-NEXT:    # ymm17 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [2,2,3,3,2,2,3,3]
+; AVX512F-FAST-NEXT:    # ymm6 = mem[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vmovdqa (%rax), %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm17, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; AVX512F-FAST-NEXT:    vpandn %ymm1, %ymm6, %ymm6
+; AVX512F-FAST-NEXT:    vpermd %ymm1, %ymm6, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
+; AVX512F-FAST-NEXT:    vpandn %ymm1, %ymm4, %ymm4
 ; AVX512F-FAST-NEXT:    vmovdqa (%rax), %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm7
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm28
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm22
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm10[30],zero,ymm10[28],zero,zero,zero,zero,ymm10[31],zero,ymm10[29],zero,zero
-; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <13,u,11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u,11,u,u,u,u,14,u,12,u,u,u,u,15,u>
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm26, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm7
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm31
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm8
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm26
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm4, %zmm22
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm31
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm0[13,u,11,u,u,u,u,14,u,12,u,u,u,u,15,u,29,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm25, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm26, %ymm7, %ymm10
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
+; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm24, %ymm8, %ymm15
+; AVX512F-FAST-NEXT:    vmovdqa %xmm11, %xmm4
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm29, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm16, %xmm3
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm10[0,1,2,3],zmm7[0,1,0,1]
-; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm6, %xmm30
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm10 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm29
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm5, %xmm27
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7>
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm11[0],xmm2[0],xmm11[1],xmm2[1],xmm11[2],xmm2[2],xmm11[3],xmm2[3],xmm11[4],xmm2[4],xmm11[5],xmm2[5],xmm11[6],xmm2[6],xmm11[7],xmm2[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm29 = zmm15[0,1,2,3],zmm8[0,1,0,1]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm27, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm30, %xmm1
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm28
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm9, %xmm27
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm10, %xmm25
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7>
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm10, %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[0,1,0,1]
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa %xmm12, %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm10 = xmm12[8],xmm5[8],xmm12[9],xmm5[9],xmm12[10],xmm5[10],xmm12[11],xmm5[11],xmm12[12],xmm5[12],xmm12[13],xmm5[13],xmm12[14],xmm5[14],xmm12[15],xmm5[15]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm19, %xmm4
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm18, %xmm5
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
+; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm20, %xmm11
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3],xmm0[4],xmm11[4],xmm0[5],xmm11[5],xmm0[6],xmm11[6],xmm0[7],xmm11[7]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm11[8],xmm0[8],xmm11[9],xmm0[9],xmm11[10],xmm0[10],xmm11[11],xmm0[11],xmm11[12],xmm0[12],xmm11[13],xmm0[13],xmm11[14],xmm0[14],xmm11[15],xmm0[15]
+; AVX512F-FAST-NEXT:    vmovdqa %xmm14, %xmm8
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm16, %xmm5
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm14[8],xmm5[8],xmm14[9],xmm5[9],xmm14[10],xmm5[10],xmm14[11],xmm5[11],xmm14[12],xmm5[12],xmm14[13],xmm5[13],xmm14[14],xmm5[14],xmm14[15],xmm5[15]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm10, %xmm0
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm21[0,1,0,1],zmm0[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u,11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm8, %ymm13
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm24, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm25 = ymm1[2,3,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm23, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm11
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm24 = ymm11[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm0
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm21, %zmm30
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u,11,u,u,u,u,14,u,12,u,u,u,u,15,u,13,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm7, %ymm11
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm3
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm21 = ymm3[2,3,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm19, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm3, %ymm10
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm10[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,0,1],zmm0[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rax), %xmm11
-; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm11[0,1,2,3,4,5,5,6]
-; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm17, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rax), %xmm10
+; AVX512F-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm10[0,1,2,3,4,5,5,6]
+; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm6, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[18],zero,zero,zero,zero,ymm3[21],zero,ymm3[19],zero,zero,zero,zero,ymm3[22],zero,ymm3[20]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm15[9,u,7,u,u,u,u,10,u,8,u,u,u,u,11,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm23 = ymm8[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm1[18,19,20,21],zero,ymm1[19],zero,ymm1[25,26,27,22],zero,ymm1[20],zero
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm15[21],zero,ymm15[19],zero,zero,zero,zero,ymm15[22],zero,ymm15[20],zero,zero
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm18
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[18],zero,zero,zero,zero,ymm1[21],zero,ymm1[19],zero,zero,zero,zero,ymm1[22],zero,ymm1[20]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm18, %ymm0
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm0[9,u,7,u,u,u,u,10,u,8,u,u,u,u,11,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm19 = ymm7[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm3[18,19,20,21],zero,ymm3[19],zero,ymm3[25,26,27,22],zero,ymm3[20],zero
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm16
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20],zero,zero
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm28, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm8
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm26, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm7
 ; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [4,5,4,5,5,7,4,5]
 ; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm1, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm28 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; AVX512F-FAST-NEXT:    vpandnq %ymm0, %ymm28, %ymm0
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm8
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29>
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm3, %ymm15
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm19 = ymm15[2,3,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
+; AVX512F-FAST-NEXT:    vpandnq %ymm0, %ymm26, %ymm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm7, %zmm7
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29>
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm15, %ymm13
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm17 = ymm13[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm31, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm14
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm31
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm28 = ymm14[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5>
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm6, %xmm6
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm14, %xmm0
-; AVX512F-FAST-NEXT:    vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    # zmm0 = zmm0[0,1,0,1],mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm0[13,u,11,u,u,u,u,14,u,12,u,u,u,u,15,u,29,u,27,u,u,u,u,30,u,28,u,u,u,u,31,u]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm18 = ymm14[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3],xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5>
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm9, %xmm9
+; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 16-byte Folded Reload
 ; AVX512F-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm0[23],zero,ymm0[23,24,25,26],zero,ymm0[24],zero,ymm0[30,31]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm17 = ymm0[2,3,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa %ymm9, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm9[30],zero,ymm9[28],zero,zero,zero,zero,ymm9[31],zero,ymm9[29],zero,zero,zero
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm20, %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm4
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm0[23],zero,ymm0[23,24,25,26],zero,ymm0[24],zero,ymm0[30,31]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm0, %ymm26
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm1[2,3,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm23, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm2[30],zero,ymm2[28],zero,zero,zero,zero,ymm2[31],zero,ymm2[29],zero,zero,zero
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm0[2,3,2,3]
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm4
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm13[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm0 = mem[2,3,2,3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm29, %xmm5
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm27, %xmm7
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3],xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm30, %xmm5
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm5, %xmm15
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm31 = ymm31[2,3,2,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm27, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm25, %xmm5
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm28, %xmm5
+; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm5, %xmm14
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm18[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm7, %xmm7
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23>
-; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm3, %ymm13
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
-; AVX512F-FAST-NEXT:    vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    # zmm7 = zmm7[0,1,0,1],mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm1[9,u,7,u,u,u,u,10,u,8,u,u,u,u,11,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm16[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpshufb %ymm9, %ymm2, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23>
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm15, %ymm15
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm23 # 16-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm2[9,u,7,u,u,u,u,10,u,8,u,u,u,u,11,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm1, %ymm2
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpor %ymm4, %ymm14, %ymm4
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm4
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm6, %ymm20, %ymm0
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm0, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm9, %zmm1
-; AVX512F-FAST-NEXT:    vpor %ymm12, %ymm10, %ymm4
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
-; AVX512F-FAST-NEXT:    vpor %ymm5, %ymm13, %ymm5
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm9, %zmm5
-; AVX512F-FAST-NEXT:    vpandq %ymm6, %ymm25, %ymm4
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm24, %zmm4
-; AVX512F-FAST-NEXT:    vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpandq %ymm26, %ymm23, %ymm6
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm6, %zmm6
-; AVX512F-FAST-NEXT:    vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm6
-; AVX512F-FAST-NEXT:    vpandq %ymm26, %ymm28, %ymm4
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm17, %zmm4
-; AVX512F-FAST-NEXT:    vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm9, %zmm4
-; AVX512F-FAST-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm21 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm6 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpandq %ymm26, %ymm3, %ymm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512F-FAST-NEXT:    vporq {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm2 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm9, %zmm2
+; AVX512F-FAST-NEXT:    vpor %ymm6, %ymm4, %ymm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm1, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
+; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm4, %ymm11, %ymm31
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm31, %zmm9
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm11, %zmm9
+; AVX512F-FAST-NEXT:    vpor %ymm5, %ymm12, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm1
+; AVX512F-FAST-NEXT:    vpor %ymm3, %ymm15, %ymm3
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm5, %zmm3
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm11, %zmm3
+; AVX512F-FAST-NEXT:    vpandq %ymm4, %ymm21, %ymm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm20, %zmm1
+; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm4 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vporq %zmm4, %zmm1, %zmm1
+; AVX512F-FAST-NEXT:    vpandq %ymm24, %ymm19, %ymm4
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm4, %zmm4
+; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm5 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vporq %zmm5, %zmm4, %zmm4
+; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm4
+; AVX512F-FAST-NEXT:    vpandq %ymm24, %ymm18, %ymm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm13, %zmm1
+; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm5 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vporq %zmm5, %zmm1, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
+; AVX512F-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm5, %zmm1
+; AVX512F-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm4 = mem[0,1,0,1,4,5,4,5]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm11 = zmm30[0,1,0,1,4,5,4,5]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm5, %zmm11
+; AVX512F-FAST-NEXT:    vpandq %ymm24, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm2 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vporq %zmm2, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm2 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm4 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vporq %zmm2, %zmm4, %zmm2
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm0, %zmm5, %zmm2
 ; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm3 # 16-byte Folded Reload
-; AVX512F-FAST-NEXT:    # xmm3 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512F-FAST-NEXT:    vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm18 # 64-byte Folded Reload
-; AVX512F-FAST-NEXT:    # zmm18 = zmm3[0,1,0,1],mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm11[1,1,0,0,4,5,6,7]
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [0,1,0,1,2,0,0,1]
-; AVX512F-FAST-NEXT:    vpermd %ymm6, %ymm9, %ymm19
+; AVX512F-FAST-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512F-FAST-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512F-FAST-NEXT:    vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm16 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm16 = zmm0[0,1,0,1],mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm10[1,1,0,0,4,5,6,7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [0,1,0,1,2,0,0,1]
+; AVX512F-FAST-NEXT:    vpermd %ymm4, %ymm5, %ymm18
 ; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm0[1,1,0,0,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpermd %ymm10, %ymm9, %ymm17
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = [4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vpshufb %xmm10, %xmm0, %xmm10
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22>
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm31, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm0, %ymm13
-; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[25],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero
-; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm0, %ymm12
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128>
+; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm0[1,1,0,0,4,5,6,7]
+; AVX512F-FAST-NEXT:    vpermd %ymm12, %ymm5, %ymm17
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm10
+; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm12
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22>
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm0, %ymm14
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm0[23],zero,ymm0[23,24,25,26],zero,ymm0[24],zero,ymm0[30,31]
-; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm0, %ymm15
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[25],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm0, %ymm13
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128>
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm26, %ymm4
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm5
+; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm6[23],zero,ymm6[23,24,25,26],zero,ymm6[24],zero,ymm6[30,31]
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm6, %ymm8
 ; AVX512F-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-FAST-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [4,5,4,5,5,7,4,5]
-; AVX512F-FAST-NEXT:    vpermd %ymm6, %ymm20, %ymm20
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [4,5,4,5,5,7,4,5]
+; AVX512F-FAST-NEXT:    vpermd %ymm6, %ymm19, %ymm19
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm24
-; AVX512F-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm23 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm20 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm20
+; AVX512F-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm21 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm21 = mem[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,1,0]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,1,0]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,1,0]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4, %zmm4 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm23 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm23
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm22
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm23, %zmm22
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm6, %zmm4
-; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm16
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm16
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm1 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm1
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm21, %zmm1
-; AVX512F-FAST-NEXT:    vpor %ymm3, %ymm13, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm4[0,1,2,3],zmm3[0,1,2,3]
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm8
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm8
-; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21, %zmm21 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm21
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm22
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm21, %zmm22
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm6, %zmm1
+; AVX512F-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm29
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm29
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm1 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm1
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm1
+; AVX512F-FAST-NEXT:    vpor %ymm5, %ymm14, %ymm5
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
+; AVX512F-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm5[4,5,6,7]
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm7
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm7
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm17, %zmm3
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm3
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm3
-; AVX512F-FAST-NEXT:    vpor %ymm14, %ymm9, %ymm4
-; AVX512F-FAST-NEXT:    vpor %ymm12, %ymm15, %ymm5
-; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm5[0,1,2,3],zmm4[0,1,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm20, %zmm0
+; AVX512F-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Folded Reload
+; AVX512F-FAST-NEXT:    # zmm3 = mem[0,1,0,1,4,5,4,5]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm5 = zmm23[0,1,0,1,4,5,4,5]
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm5
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm17, %zmm3
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm3
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm3
+; AVX512F-FAST-NEXT:    vpor %ymm4, %ymm15, %ymm4
+; AVX512F-FAST-NEXT:    vpor %ymm13, %ymm8, %ymm5
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm4
+; AVX512F-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm5[0,1,2,3],zmm4[4,5,6,7]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm19, %zmm0
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm0
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
 ; AVX512F-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, 128(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, 320(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm7, 320(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 256(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm16, 192(%rax)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm29, 192(%rax)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm22, 64(%rax)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm24, 384(%rax)
-; AVX512F-FAST-NEXT:    addq $1560, %rsp # imm = 0x618
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm20, 384(%rax)
+; AVX512F-FAST-NEXT:    addq $1432, %rsp # imm = 0x598
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -8265,15 +8298,17 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vpor %xmm2, %xmm6, %xmm2
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,0,1],zmm3[0,1,0,1]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm2, %zmm2
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
 ; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm15, %xmm4
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
 ; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm14, %xmm7
 ; AVX512BW-SLOW-NEXT:    vpor %xmm4, %xmm7, %xmm4
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm15[8],xmm14[8],xmm15[9],xmm14[9],xmm15[10],xmm14[10],xmm15[11],xmm14[11],xmm15[12],xmm14[12],xmm15[13],xmm14[13],xmm15[14],xmm14[14],xmm15[15],xmm14[15]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm2[0,1,0,1,4,5,4,5]
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,0,1],zmm7[0,1,0,1]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm7, %zmm4, %zmm4
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm4 = zmm4[0,1,0,1,4,5,4,5]
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm2, %zmm4 {%k2}
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
 ; AVX512BW-SLOW-NEXT:    vpshufb %xmm2, %xmm26, %xmm7
@@ -8284,7 +8319,8 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vpor %xmm7, %xmm12, %xmm7
 ; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm12 = xmm27[8],xmm26[8],xmm27[9],xmm26[9],xmm27[10],xmm26[10],xmm27[11],xmm26[11],xmm27[12],xmm26[12],xmm27[13],xmm26[13],xmm27[14],xmm26[14],xmm27[15],xmm26[15]
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm7[0,1,0,1],zmm12[0,1,0,1]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm12, %zmm7, %zmm7
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm7 = zmm7[0,1,0,1,4,5,4,5]
 ; AVX512BW-SLOW-NEXT:    movabsq $290499906672591364, %rax # imm = 0x408102040810204
 ; AVX512BW-SLOW-NEXT:    kmovq %rax, %k1
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm10, %zmm7 {%k1}
@@ -8296,13 +8332,15 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vpor %xmm1, %xmm5, %xmm1
 ; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm29[0],xmm23[0],xmm29[1],xmm23[1],xmm29[2],xmm23[2],xmm29[3],xmm23[3],xmm29[4],xmm23[4],xmm29[5],xmm23[5],xmm29[6],xmm23[6],xmm29[7],xmm23[7]
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm5[0,1,0,1],zmm1[0,1,0,1]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm5, %zmm1
 ; AVX512BW-SLOW-NEXT:    vpshufb %xmm3, %xmm16, %xmm3
 ; AVX512BW-SLOW-NEXT:    vpshufb %xmm6, %xmm22, %xmm5
 ; AVX512BW-SLOW-NEXT:    vpor %xmm3, %xmm5, %xmm3
 ; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm22[0],xmm16[0],xmm22[1],xmm16[1],xmm22[2],xmm16[2],xmm22[3],xmm16[3],xmm22[4],xmm16[4],xmm22[5],xmm16[5],xmm22[6],xmm16[6],xmm22[7],xmm16[7]
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm5[0,1,0,1],zmm3[0,1,0,1]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm5, %zmm3
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm3 = zmm3[0,1,0,1,4,5,4,5]
 ; AVX512BW-SLOW-NEXT:    movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
 ; AVX512BW-SLOW-NEXT:    kmovq %rax, %k1
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm1, %zmm3 {%k1}
@@ -8313,9 +8351,10 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vpor %xmm1, %xmm2, %xmm1
 ; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
 ; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512BW-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm2[0,1,0,1],zmm1[0,1,0,1]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm1
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
 ; AVX512BW-SLOW-NEXT:    vpermw %zmm0, %zmm2, %zmm0
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
 ; AVX512BW-SLOW-NEXT:    movabsq $4647998506761461824, %rax # imm = 0x4081020408102040
 ; AVX512BW-SLOW-NEXT:    kmovq %rax, %k1
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm1 {%k1}
@@ -8324,8 +8363,8 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-SLOW-NEXT:    vmovdqu8 %zmm1, %zmm3 {%k1}
 ; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm4, 256(%rax)
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm11, 320(%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm4, 256(%rax)
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm17, 128(%rax)
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm8, 64(%rax)
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm25, 384(%rax)
@@ -8335,12 +8374,12 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512BW-FAST-LABEL: store_i8_stride7_vf64:
 ; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    subq $152, %rsp
+; AVX512BW-FAST-NEXT:    subq $200, %rsp
 ; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm5
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rsi), %zmm3
-; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm3, (%rsp) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdx), %zmm2
 ; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rax), %ymm6
@@ -8352,36 +8391,36 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    # ymm4 = mem[0,1,0,1]
 ; AVX512BW-FAST-NEXT:    vpermw %ymm6, %ymm4, %ymm4
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm4, %zmm6
-; AVX512BW-FAST-NEXT:    vmovdqa (%r9), %ymm10
+; AVX512BW-FAST-NEXT:    vmovdqa (%r9), %ymm14
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm18 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm18, %ymm10, %ymm7
+; AVX512BW-FAST-NEXT:    vpshufb %ymm18, %ymm14, %ymm7
 ; AVX512BW-FAST-NEXT:    vmovdqa (%r8), %ymm1
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
 ; AVX512BW-FAST-NEXT:    vpshufb %ymm20, %ymm1, %ymm8
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %ymm1, %ymm24
 ; AVX512BW-FAST-NEXT:    vpor %ymm7, %ymm8, %ymm7
-; AVX512BW-FAST-NEXT:    vmovdqa (%r9), %xmm1
-; AVX512BW-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%r8), %xmm25
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm25[8],xmm1[8],xmm25[9],xmm1[9],xmm25[10],xmm1[10],xmm25[11],xmm1[11],xmm25[12],xmm1[12],xmm25[13],xmm1[13],xmm25[14],xmm1[14],xmm25[15],xmm1[15]
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%r9), %xmm25
+; AVX512BW-FAST-NEXT:    vmovdqa (%r8), %xmm10
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm10[8],xmm25[8],xmm10[9],xmm25[9],xmm10[10],xmm25[10],xmm10[11],xmm25[11],xmm10[12],xmm25[12],xmm10[13],xmm25[13],xmm10[14],xmm25[14],xmm10[15],xmm25[15]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm22
 ; AVX512BW-FAST-NEXT:    movabsq $2323999253380730912, %r10 # imm = 0x2040810204081020
 ; AVX512BW-FAST-NEXT:    kmovq %r10, %k1
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm6, %zmm22 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %ymm15
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %ymm1
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm21, %ymm15, %ymm8
+; AVX512BW-FAST-NEXT:    vpshufb %ymm21, %ymm1, %ymm8
+; AVX512BW-FAST-NEXT:    vmovdqa %ymm1, %ymm6
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %ymm1
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
 ; AVX512BW-FAST-NEXT:    vpshufb %ymm23, %ymm1, %ymm11
 ; AVX512BW-FAST-NEXT:    vmovdqa %ymm1, %ymm7
 ; AVX512BW-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512BW-FAST-NEXT:    vpor %ymm8, %ymm11, %ymm8
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm14
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm15
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %xmm16
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm16[8],xmm14[8],xmm16[9],xmm14[9],xmm16[10],xmm14[10],xmm16[11],xmm14[11],xmm16[12],xmm14[12],xmm16[13],xmm14[13],xmm16[14],xmm14[14],xmm16[15],xmm14[15]
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm16[8],xmm15[8],xmm16[9],xmm15[9],xmm16[10],xmm15[10],xmm16[11],xmm15[11],xmm16[12],xmm15[12],xmm16[13],xmm15[13],xmm16[14],xmm15[14],xmm16[15],xmm15[15]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm11, %zmm26
@@ -8445,7 +8484,8 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vporq %ymm20, %ymm23, %ymm20
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %zmm23
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm18, %zmm18
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%r8), %zmm20
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%r8), %zmm9
+; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512BW-FAST-NEXT:    movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
 ; AVX512BW-FAST-NEXT:    kmovq %r10, %k2
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm21, %zmm18 {%k2}
@@ -8476,7 +8516,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm27[0,1,2,3],zmm21[4,5,6,7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm0[23],zero,zmm0[23,24,25,26],zero,zmm0[24],zero,zmm0[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm0[59],zero,zero,zero,zero,zmm0[62],zero,zmm0[60],zero,zero,zero,zero,zmm0[63],zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm20[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm9[4,5,6,7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm1[25],zero,zmm1[23],zero,zero,zero,zero,zmm1[26],zero,zmm1[24],zero,zero,zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm1[59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero,zmm1[61]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
 ; AVX512BW-FAST-NEXT:    vporq %zmm0, %zmm1, %zmm0
@@ -8499,7 +8539,8 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm2
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm7[30],zero,ymm7[28],zero,zero,zero,zero,ymm7[31],zero,ymm7[29],zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm27 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm15[30],zero,ymm15[28],zero,zero,zero,zero,ymm15[31],zero,ymm15[29],zero,zero
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm27 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29],zero,zero
+; AVX512BW-FAST-NEXT:    vmovdqa64 %ymm6, %ymm20
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm27 = ymm27[2,3,2,3]
 ; AVX512BW-FAST-NEXT:    vporq %ymm0, %ymm27, %ymm27
 ; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdx), %xmm31
@@ -8510,7 +8551,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm27, %zmm27
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm2, %zmm27 {%k2}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm10[27],zero,zero,zero,zero,ymm10[30],zero,ymm10[28],zero,zero,zero,zero,ymm10[31],zero
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm14[27],zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm24[27],zero,zero,zero,zero,ymm24[30],zero,ymm24[28],zero,zero,zero,zero,ymm24[31],zero,ymm24[29]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %ymm24, %ymm7
@@ -8538,7 +8579,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vporq %xmm2, %xmm24, %xmm2
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm31[8],xmm1[9],xmm31[9],xmm1[10],xmm31[10],xmm1[11],xmm31[11],xmm1[12],xmm31[12],xmm1[13],xmm31[13],xmm1[14],xmm31[14],xmm1[15],xmm31[15]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm2[0,1,0,1],zmm1[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm1
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm2, %xmm30, %xmm24
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm31 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
@@ -8546,7 +8587,9 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vporq %xmm24, %xmm29, %xmm24
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm28 = xmm30[8],xmm28[8],xmm30[9],xmm28[9],xmm30[10],xmm28[10],xmm30[11],xmm28[11],xmm30[12],xmm28[12],xmm30[13],xmm28[13],xmm30[14],xmm28[14],xmm30[15],xmm28[15]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm28 = xmm28[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm24[0,1,0,1],zmm28[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm28, %zmm24, %zmm24
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm24 = zmm24[0,1,0,1,4,5,4,5]
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm24 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm28
@@ -8555,9 +8598,10 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vporq %xmm28, %xmm30, %xmm28
 ; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm28[0,1,0,1],zmm3[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm3, %zmm28, %zmm3
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
 ; AVX512BW-FAST-NEXT:    vpermi2w %zmm26, %zmm12, %zmm4
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm3 = zmm3[0,1,0,1,4,5,4,5]
 ; AVX512BW-FAST-NEXT:    movabsq $290499906672591364, %rax # imm = 0x408102040810204
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k2
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm4, %zmm3 {%k2}
@@ -8565,27 +8609,29 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k2
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm3, %zmm24 {%k2}
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm5, %xmm16, %xmm3
-; AVX512BW-FAST-NEXT:    vpshufb %xmm0, %xmm14, %xmm0
+; AVX512BW-FAST-NEXT:    vpshufb %xmm0, %xmm15, %xmm0
 ; AVX512BW-FAST-NEXT:    vpor %xmm3, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm14[0],xmm16[0],xmm14[1],xmm16[1],xmm14[2],xmm16[2],xmm14[3],xmm16[3],xmm14[4],xmm16[4],xmm14[5],xmm16[5],xmm14[6],xmm16[6],xmm14[7],xmm16[7]
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm15[0],xmm16[0],xmm15[1],xmm16[1],xmm15[2],xmm16[2],xmm15[3],xmm16[3],xmm15[4],xmm16[4],xmm15[5],xmm16[5],xmm15[6],xmm16[6],xmm15[7],xmm16[7]
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm6, %xmm3, %xmm3
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm3[0,1,0,1],zmm0[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm3, %zmm0
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm2, %xmm19, %xmm2
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm31, %xmm17, %xmm3
 ; AVX512BW-FAST-NEXT:    vpor %xmm2, %xmm3, %xmm2
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm17[0],xmm19[0],xmm17[1],xmm19[1],xmm17[2],xmm19[2],xmm17[3],xmm19[3],xmm17[4],xmm19[4],xmm17[5],xmm19[5],xmm17[6],xmm19[6],xmm17[7],xmm19[7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm3[0,1,0,1],zmm2[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm3, %zmm2
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm2 = zmm2[0,1,0,1,4,5,4,5]
 ; AVX512BW-FAST-NEXT:    movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k2
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm2 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX512BW-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm0
-; AVX512BW-FAST-NEXT:    vpshufb %xmm29, %xmm25, %xmm1
+; AVX512BW-FAST-NEXT:    vpshufb %xmm1, %xmm25, %xmm0
+; AVX512BW-FAST-NEXT:    vpshufb %xmm29, %xmm10, %xmm1
 ; AVX512BW-FAST-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm25[0],xmm3[0],xmm25[1],xmm3[1],xmm25[2],xmm3[2],xmm25[3],xmm3[3],xmm25[4],xmm3[4],xmm25[5],xmm3[5],xmm25[6],xmm3[6],xmm25[7],xmm3[7]
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm10[0],xmm25[0],xmm10[1],xmm25[1],xmm10[2],xmm25[2],xmm10[3],xmm25[3],xmm10[4],xmm25[4],xmm10[5],xmm25[5],xmm10[6],xmm25[6],xmm10[7],xmm25[7]
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,1,0,1,4,5,4,5]
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm1 # 32-byte Folded Reload
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
 ; AVX512BW-FAST-NEXT:    vpermw %zmm1, %zmm3, %zmm3
@@ -8597,7 +8643,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm2 {%k2}
 ; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512BW-FAST-NEXT:    vmovdqu64 (%rsp), %zmm3 # 64-byte Reload
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm3, %zmm3
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,zmm0[19],zero,zmm0[21,20,21,22],zero,zmm0[20],zero,zmm0[22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm0[55],zero,zero,zero,zero,zmm0[58],zero,zmm0[56],zero,zero,zero,zero,zmm0[59],zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
@@ -8605,7 +8651,7 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
 ; AVX512BW-FAST-NEXT:    vporq %zmm0, %zmm3, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm3, %zmm3
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm3, %zmm3
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm4 # 32-byte Folded Reload
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zmm3[18,19,20,21],zero,zmm3[19],zero,zmm3[25,26,27,22],zero,zmm3[20],zero,zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57],zero,zmm3[55],zero,zmm3[53,54,55,58],zero,zmm3[56],zero,zmm3[60,61,58,59]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
@@ -8613,8 +8659,9 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm4 = zmm4[2,3,2,3,6,7,6,7]
 ; AVX512BW-FAST-NEXT:    vporq %zmm3, %zmm4, %zmm3
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm3 {%k1}
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm20, %zmm0
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm21, %zmm4
+; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm0
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm21, %zmm4
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,zmm0[18],zero,zmm0[20,21,20,21],zero,zmm0[19],zero,zmm0[19,20,21,22],zero,zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57,56,57],zero,zmm0[55],zero,zmm0[55,56,57,58],zero,zmm0[56],zero,zmm0[62,63]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm4[20],zero,zmm4[18],zero,zero,zero,zero,zmm4[21],zero,zmm4[19],zero,zero,zero,zero,zmm4[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm4[57],zero,zmm4[55],zero,zero,zero,zero,zmm4[58],zero,zmm4[56],zero,zero
@@ -8630,13 +8677,13 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm3 {%k1}
 ; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm3, 128(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm18, 320(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm24, 256(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm27, 192(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm8, 64(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm22, 384(%rax)
-; AVX512BW-FAST-NEXT:    addq $152, %rsp
+; AVX512BW-FAST-NEXT:    addq $200, %rsp
 ; AVX512BW-FAST-NEXT:    vzeroupper
 ; AVX512BW-FAST-NEXT:    retq
   %in.vec0 = load <64 x i8>, ptr %in.vecptr0, align 64

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
index 69ae0123aa1d7..7a2024c6f908e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
@@ -419,26 +419,26 @@ define void @store_i8_stride8_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX2-SLOW-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX2-SLOW-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
 ; AVX2-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm4
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm5
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,ymm2[0,8],zero,zero,zero,zero,zero,zero,ymm2[1,9],zero,zero,zero,zero,ymm2[18,26],zero,zero,zero,zero,zero,zero,ymm2[19,27],zero,zero
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,ymm5[0,8],zero,zero,zero,zero,zero,zero,ymm5[1,9],zero,zero,zero,zero,zero,zero,zero,zero,ymm5[18,26],zero,zero,zero,zero,zero,zero,ymm5[19,27]
-; AVX2-SLOW-NEXT:    vpor %ymm3, %ymm6, %ymm3
-; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,ymm0[0,8],zero,zero,zero,zero,zero,zero,ymm0[1,9],zero,zero,zero,zero,ymm0[18,26],zero,zero,zero,zero,zero,zero,ymm0[19,27],zero,zero,zero,zero,zero,zero
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm4[0,8],zero,zero,zero,zero,zero,zero,ymm4[1,9],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[18,26],zero,zero,zero,zero,zero,zero,ymm4[19,27],zero,zero,zero,zero
-; AVX2-SLOW-NEXT:    vpor %ymm1, %ymm6, %ymm1
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm1
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,ymm1[0,8],zero,zero,zero,zero,zero,zero,ymm1[1,9],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[18,26],zero,zero,zero,zero,zero,zero,ymm1[19,27]
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,ymm3[0,8],zero,zero,zero,zero,zero,zero,ymm3[1,9],zero,zero,zero,zero,ymm3[18,26],zero,zero,zero,zero,zero,zero,ymm3[19,27],zero,zero
+; AVX2-SLOW-NEXT:    vpor %ymm4, %ymm2, %ymm2
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm0[0,8],zero,zero,zero,zero,zero,zero,ymm0[1,9],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[18,26],zero,zero,zero,zero,zero,zero,ymm0[19,27],zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm0[2,3,0,1]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,ymm5[0,8],zero,zero,zero,zero,zero,zero,ymm5[1,9],zero,zero,zero,zero,ymm5[18,26],zero,zero,zero,zero,zero,zero,ymm5[19,27],zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpor %ymm6, %ymm4, %ymm4
+; AVX2-SLOW-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,ymm1[4,12],zero,zero,zero,zero,zero,zero,ymm1[5,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[22,30],zero,zero,zero,zero,zero,zero,ymm1[23,31]
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,ymm3[4,12],zero,zero,zero,zero,zero,zero,ymm3[5,13],zero,zero,zero,zero,ymm3[22,30],zero,zero,zero,zero,zero,zero,ymm3[23,31],zero,zero
 ; AVX2-SLOW-NEXT:    vpor %ymm3, %ymm1, %ymm1
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,ymm2[4,12],zero,zero,zero,zero,zero,zero,ymm2[5,13],zero,zero,zero,zero,ymm2[22,30],zero,zero,zero,zero,zero,zero,ymm2[23,31],zero,zero
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,ymm5[4,12],zero,zero,zero,zero,zero,zero,ymm5[5,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm5[22,30],zero,zero,zero,zero,zero,zero,ymm5[23,31]
-; AVX2-SLOW-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[4,12],zero,zero,zero,zero,zero,zero,ymm0[5,13],zero,zero,zero,zero,ymm0[22,30],zero,zero,zero,zero,zero,zero,ymm0[23,31],zero,zero,zero,zero,zero,zero
-; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm4[4,12],zero,zero,zero,zero,zero,zero,ymm4[5,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[22,30],zero,zero,zero,zero,zero,zero,ymm4[23,31],zero,zero,zero,zero
-; AVX2-SLOW-NEXT:    vpor %ymm0, %ymm3, %ymm0
-; AVX2-SLOW-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[4,12],zero,zero,zero,zero,zero,zero,ymm0[5,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[22,30],zero,zero,zero,zero,zero,zero,ymm0[23,31],zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,ymm5[4,12],zero,zero,zero,zero,zero,zero,ymm5[5,13],zero,zero,zero,zero,ymm5[22,30],zero,zero,zero,zero,zero,zero,ymm5[23,31],zero,zero,zero,zero,zero,zero
+; AVX2-SLOW-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX2-SLOW-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX2-SLOW-NEXT:    vmovdqa %ymm0, 32(%rax)
-; AVX2-SLOW-NEXT:    vmovdqa %ymm1, (%rax)
+; AVX2-SLOW-NEXT:    vmovdqa %ymm2, (%rax)
 ; AVX2-SLOW-NEXT:    vzeroupper
 ; AVX2-SLOW-NEXT:    retq
 ;
@@ -499,26 +499,26 @@ define void @store_i8_stride8_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX2-FAST-PERLANE-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX2-FAST-PERLANE-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
 ; AVX2-FAST-PERLANE-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm4
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm5
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,ymm2[0,8],zero,zero,zero,zero,zero,zero,ymm2[1,9],zero,zero,zero,zero,ymm2[18,26],zero,zero,zero,zero,zero,zero,ymm2[19,27],zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,ymm5[0,8],zero,zero,zero,zero,zero,zero,ymm5[1,9],zero,zero,zero,zero,zero,zero,zero,zero,ymm5[18,26],zero,zero,zero,zero,zero,zero,ymm5[19,27]
-; AVX2-FAST-PERLANE-NEXT:    vpor %ymm3, %ymm6, %ymm3
-; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,ymm0[0,8],zero,zero,zero,zero,zero,zero,ymm0[1,9],zero,zero,zero,zero,ymm0[18,26],zero,zero,zero,zero,zero,zero,ymm0[19,27],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm6 = ymm4[0,8],zero,zero,zero,zero,zero,zero,ymm4[1,9],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[18,26],zero,zero,zero,zero,zero,zero,ymm4[19,27],zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vpor %ymm1, %ymm6, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm1
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,ymm1[0,8],zero,zero,zero,zero,zero,zero,ymm1[1,9],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[18,26],zero,zero,zero,zero,zero,zero,ymm1[19,27]
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,ymm3[0,8],zero,zero,zero,zero,zero,zero,ymm3[1,9],zero,zero,zero,zero,ymm3[18,26],zero,zero,zero,zero,zero,zero,ymm3[19,27],zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpor %ymm4, %ymm2, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm4 = ymm0[0,8],zero,zero,zero,zero,zero,zero,ymm0[1,9],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[18,26],zero,zero,zero,zero,zero,zero,ymm0[19,27],zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm5 = ymm0[2,3,0,1]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,ymm5[0,8],zero,zero,zero,zero,zero,zero,ymm5[1,9],zero,zero,zero,zero,ymm5[18,26],zero,zero,zero,zero,zero,zero,ymm5[19,27],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpor %ymm6, %ymm4, %ymm4
+; AVX2-FAST-PERLANE-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,ymm1[4,12],zero,zero,zero,zero,zero,zero,ymm1[5,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm1[22,30],zero,zero,zero,zero,zero,zero,ymm1[23,31]
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,ymm3[4,12],zero,zero,zero,zero,zero,zero,ymm3[5,13],zero,zero,zero,zero,ymm3[22,30],zero,zero,zero,zero,zero,zero,ymm3[23,31],zero,zero
 ; AVX2-FAST-PERLANE-NEXT:    vpor %ymm3, %ymm1, %ymm1
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,ymm2[4,12],zero,zero,zero,zero,zero,zero,ymm2[5,13],zero,zero,zero,zero,ymm2[22,30],zero,zero,zero,zero,zero,zero,ymm2[23,31],zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,ymm5[4,12],zero,zero,zero,zero,zero,zero,ymm5[5,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm5[22,30],zero,zero,zero,zero,zero,zero,ymm5[23,31]
-; AVX2-FAST-PERLANE-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[4,12],zero,zero,zero,zero,zero,zero,ymm0[5,13],zero,zero,zero,zero,ymm0[22,30],zero,zero,zero,zero,zero,zero,ymm0[23,31],zero,zero,zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm3 = ymm4[4,12],zero,zero,zero,zero,zero,zero,ymm4[5,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[22,30],zero,zero,zero,zero,zero,zero,ymm4[23,31],zero,zero,zero,zero
-; AVX2-FAST-PERLANE-NEXT:    vpor %ymm0, %ymm3, %ymm0
-; AVX2-FAST-PERLANE-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[4,12],zero,zero,zero,zero,zero,zero,ymm0[5,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[22,30],zero,zero,zero,zero,zero,zero,ymm0[23,31],zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,ymm5[4,12],zero,zero,zero,zero,zero,zero,ymm5[5,13],zero,zero,zero,zero,ymm5[22,30],zero,zero,zero,zero,zero,zero,ymm5[23,31],zero,zero,zero,zero,zero,zero
+; AVX2-FAST-PERLANE-NEXT:    vpor %ymm3, %ymm0, %ymm0
+; AVX2-FAST-PERLANE-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm0, 32(%rax)
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm1, (%rax)
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa %ymm2, (%rax)
 ; AVX2-FAST-PERLANE-NEXT:    vzeroupper
 ; AVX2-FAST-PERLANE-NEXT:    retq
 ;
@@ -539,25 +539,25 @@ define void @store_i8_stride8_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX512F-SLOW-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX512F-SLOW-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
 ; AVX512F-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm4
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm5
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm2[u,u,u,u],zero,zero,ymm2[4,12,u,u,u,u],zero,zero,ymm2[5,13,u,u,u,u,22,30],zero,zero,ymm2[u,u,u,u,23,31],zero,zero
-; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = zero,zero,ymm0[4,12,u,u,u,u],zero,zero,ymm0[5,13,u,u,u,u,22,30],zero,zero,ymm0[u,u,u,u,23,31],zero,zero,ymm0[u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u],zero,zero,ymm2[0,8,u,u,u,u],zero,zero,ymm2[1,9,u,u,u,u,18,26],zero,zero,ymm2[u,u,u,u,19,27],zero,zero
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[0,8,u,u,u,u],zero,zero,ymm0[1,9,u,u,u,u,18,26],zero,zero,ymm0[u,u,u,u,19,27],zero,zero,ymm0[u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm5[u,u,u,u,4,12],zero,zero,ymm5[u,u,u,u,5,13],zero,zero,ymm5[u,u,u,u],zero,zero,ymm5[22,30,u,u,u,u],zero,zero,ymm5[23,31]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm4[4,12],zero,zero,ymm4[u,u,u,u,5,13],zero,zero,ymm4[u,u,u,u],zero,zero,ymm4[22,30,u,u,u,u],zero,zero,ymm4[23,31,u,u,u,u]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm5[u,u,u,u,0,8],zero,zero,ymm5[u,u,u,u,1,9],zero,zero,ymm5[u,u,u,u],zero,zero,ymm5[18,26,u,u,u,u],zero,zero,ymm5[19,27]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm4[0,8],zero,zero,ymm4[u,u,u,u,1,9],zero,zero,ymm4[u,u,u,u],zero,zero,ymm4[18,26,u,u,u,u],zero,zero,ymm4[19,27,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,4,12],zero,zero,ymm1[u,u,u,u,5,13],zero,zero,ymm1[u,u,u,u],zero,zero,ymm1[22,30,u,u,u,u],zero,zero,ymm1[23,31]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm0[4,12],zero,zero,ymm0[u,u,u,u,5,13],zero,zero,ymm0[u,u,u,u],zero,zero,ymm0[22,30,u,u,u,u],zero,zero,ymm0[23,31,u,u,u,u]
 ; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512F-SLOW-NEXT:    vpord %zmm0, %zmm1, %zmm0
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm1[u,u,u,u,0,8],zero,zero,ymm1[u,u,u,u,1,9],zero,zero,ymm1[u,u,u,u],zero,zero,ymm1[18,26,u,u,u,u],zero,zero,ymm1[19,27]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm0[0,8],zero,zero,ymm0[u,u,u,u,1,9],zero,zero,ymm0[u,u,u,u],zero,zero,ymm0[18,26,u,u,u,u],zero,zero,ymm0[19,27,u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm1[u,u,u,u],zero,zero,ymm1[4,12,u,u,u,u],zero,zero,ymm1[5,13,u,u,u,u,22,30],zero,zero,ymm1[u,u,u,u,23,31],zero,zero
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,ymm0[4,12,u,u,u,u],zero,zero,ymm0[5,13,u,u,u,u,22,30],zero,zero,ymm0[u,u,u,u,23,31],zero,zero,ymm0[u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u],zero,zero,ymm1[0,8,u,u,u,u],zero,zero,ymm1[1,9,u,u,u,u,18,26],zero,zero,ymm1[u,u,u,u,19,27],zero,zero
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,ymm0[0,8,u,u,u,u],zero,zero,ymm0[1,9,u,u,u,u,18,26],zero,zero,ymm0[u,u,u,u,19,27],zero,zero,ymm0[u,u,u,u]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpord %zmm0, %zmm2, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
@@ -619,29 +619,29 @@ define void @store_i8_stride8_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
 ; AVX512BW-SLOW-NEXT:    vmovq {{.*#+}} xmm3 = mem[0],zero
 ; AVX512BW-SLOW-NEXT:    vmovq {{.*#+}} xmm4 = mem[0],zero
 ; AVX512BW-SLOW-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm4
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm5
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm2[u,u,u,u,u,u,4,12,u,u,u,u,u,u,5,13,u,u,u,u,22,30,u,u,u,u,u,u,23,31,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm5[u,u,u,u,4,12,u,u,u,u,u,u,5,13,u,u,u,u,u,u,u,u,22,30,u,u,u,u,u,u,23,31]
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm1
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,4,12,u,u,u,u,u,u,5,13,u,u,u,u,u,u,u,u,22,30,u,u,u,u,u,u,23,31]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm3[u,u,u,u,u,u,4,12,u,u,u,u,u,u,5,13,u,u,u,u,22,30,u,u,u,u,u,u,23,31,u,u]
 ; AVX512BW-SLOW-NEXT:    movw $17544, %cx # imm = 0x4488
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm3, %ymm6 {%k1}
-; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[u,u,4,12,u,u,u,u,u,u,5,13,u,u,u,u,22,30,u,u,u,u,u,u,23,31,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm4[4,12,u,u,u,u,u,u,5,13,u,u,u,u,u,u,u,u,22,30,u,u,u,u,u,u,23,31,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm4, %ymm2 {%k1}
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm0[4,12,u,u,u,u,u,u,5,13,u,u,u,u,u,u,u,u,22,30,u,u,u,u,u,u,23,31,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm0[2,3,0,1]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm5[u,u,4,12,u,u,u,u,u,u,5,13,u,u,u,u,22,30,u,u,u,u,u,u,23,31,u,u,u,u,u,u]
 ; AVX512BW-SLOW-NEXT:    movw $4386, %cx # imm = 0x1122
 ; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k2
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm1, %ymm3 {%k2}
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0],ymm6[1],ymm3[2],ymm6[3],ymm3[4],ymm6[5],ymm3[6],ymm6[7]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm5[u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27]
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm2, %ymm3 {%k1}
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm4[0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u]
-; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm0, %ymm2 {%k2}
-; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7]
-; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm6, %ymm4 {%k2}
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4],ymm2[5],ymm4[6],ymm2[7]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u]
+; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm3, %ymm1 {%k1}
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm5[u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u]
+; AVX512BW-SLOW-NEXT:    vmovdqu16 %ymm3, %ymm0 {%k2}
+; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
 ; AVX512BW-SLOW-NEXT:    retq
@@ -3147,81 +3147,193 @@ define void @store_i8_stride8_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
 ;
-; AVX512BW-LABEL: store_i8_stride8_vf32:
-; AVX512BW:       # %bb.0:
-; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
-; AVX512BW-NEXT:    movq {{[0-9]+}}(%rsp), %r11
-; AVX512BW-NEXT:    vmovdqa (%r11), %xmm0
-; AVX512BW-NEXT:    vmovdqa 16(%r11), %xmm7
-; AVX512BW-NEXT:    vmovdqa (%r10), %xmm1
-; AVX512BW-NEXT:    vmovdqa 16(%r10), %xmm8
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512BW-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm5
-; AVX512BW-NEXT:    vmovdqa (%r9), %xmm2
-; AVX512BW-NEXT:    vmovdqa 16(%r9), %xmm9
-; AVX512BW-NEXT:    vmovdqa (%r8), %xmm3
-; AVX512BW-NEXT:    vmovdqa 16(%r8), %xmm10
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
-; AVX512BW-NEXT:    vinserti32x4 $2, %xmm4, %zmm4, %zmm11
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,0,32,4,5,1,33,2,1,2,34,4,5,3,35,16,17,20,52,20,21,21,53,16,17,22,54,22,21,23,55]
-; AVX512BW-NEXT:    vpermt2w %zmm5, %zmm4, %zmm11
-; AVX512BW-NEXT:    vmovdqa (%rcx), %xmm5
-; AVX512BW-NEXT:    vmovdqa 16(%rcx), %xmm12
-; AVX512BW-NEXT:    vmovdqa (%rdx), %xmm13
-; AVX512BW-NEXT:    vmovdqa 16(%rdx), %xmm14
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm13[8],xmm5[8],xmm13[9],xmm5[9],xmm13[10],xmm5[10],xmm13[11],xmm5[11],xmm13[12],xmm5[12],xmm13[13],xmm5[13],xmm13[14],xmm5[14],xmm13[15],xmm5[15]
-; AVX512BW-NEXT:    vinserti32x4 $2, %xmm6, %zmm6, %zmm15
-; AVX512BW-NEXT:    vmovdqa64 (%rsi), %xmm16
-; AVX512BW-NEXT:    vmovdqa64 16(%rsi), %xmm17
-; AVX512BW-NEXT:    vmovdqa64 (%rdi), %xmm18
-; AVX512BW-NEXT:    vmovdqa64 16(%rdi), %xmm19
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm18[8],xmm16[8],xmm18[9],xmm16[9],xmm18[10],xmm16[10],xmm18[11],xmm16[11],xmm18[12],xmm16[12],xmm18[13],xmm16[13],xmm18[14],xmm16[14],xmm18[15],xmm16[15]
-; AVX512BW-NEXT:    vinserti32x4 $2, %xmm6, %zmm6, %zmm6
-; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm20 = <0,32,u,u,1,33,u,u,2,34,u,u,3,35,u,u,20,52,u,u,21,53,u,u,22,54,u,u,23,55,u,u>
-; AVX512BW-NEXT:    vpermt2w %zmm15, %zmm20, %zmm6
-; AVX512BW-NEXT:    movw $-21846, %cx # imm = 0xAAAA
-; AVX512BW-NEXT:    kmovd %ecx, %k1
-; AVX512BW-NEXT:    vmovdqa32 %zmm11, %zmm6 {%k1}
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
-; AVX512BW-NEXT:    vinserti32x4 $2, %xmm11, %zmm11, %zmm11
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
-; AVX512BW-NEXT:    vinserti32x4 $2, %xmm15, %zmm15, %zmm15
-; AVX512BW-NEXT:    vpermt2w %zmm11, %zmm4, %zmm15
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm14[0],xmm12[0],xmm14[1],xmm12[1],xmm14[2],xmm12[2],xmm14[3],xmm12[3],xmm14[4],xmm12[4],xmm14[5],xmm12[5],xmm14[6],xmm12[6],xmm14[7],xmm12[7]
-; AVX512BW-NEXT:    vinserti32x4 $2, %xmm11, %zmm11, %zmm11
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm21 = xmm19[0],xmm17[0],xmm19[1],xmm17[1],xmm19[2],xmm17[2],xmm19[3],xmm17[3],xmm19[4],xmm17[4],xmm19[5],xmm17[5],xmm19[6],xmm17[6],xmm19[7],xmm17[7]
-; AVX512BW-NEXT:    vinserti32x4 $2, %xmm21, %zmm21, %zmm21
-; AVX512BW-NEXT:    vpermt2w %zmm11, %zmm20, %zmm21
-; AVX512BW-NEXT:    vmovdqa32 %zmm15, %zmm21 {%k1}
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
-; AVX512BW-NEXT:    vinserti32x4 $2, %xmm7, %zmm7, %zmm7
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
-; AVX512BW-NEXT:    vinserti32x4 $2, %xmm8, %zmm8, %zmm8
-; AVX512BW-NEXT:    vpermt2w %zmm7, %zmm4, %zmm8
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm14[8],xmm12[8],xmm14[9],xmm12[9],xmm14[10],xmm12[10],xmm14[11],xmm12[11],xmm14[12],xmm12[12],xmm14[13],xmm12[13],xmm14[14],xmm12[14],xmm14[15],xmm12[15]
-; AVX512BW-NEXT:    vinserti32x4 $2, %xmm7, %zmm7, %zmm7
-; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm19[8],xmm17[8],xmm19[9],xmm17[9],xmm19[10],xmm17[10],xmm19[11],xmm17[11],xmm19[12],xmm17[12],xmm19[13],xmm17[13],xmm19[14],xmm17[14],xmm19[15],xmm17[15]
-; AVX512BW-NEXT:    vinserti32x4 $2, %xmm9, %zmm9, %zmm9
-; AVX512BW-NEXT:    vpermt2w %zmm7, %zmm20, %zmm9
-; AVX512BW-NEXT:    vmovdqa32 %zmm8, %zmm9 {%k1}
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
-; AVX512BW-NEXT:    vinserti32x4 $2, %xmm1, %zmm1, %zmm1
-; AVX512BW-NEXT:    vpermt2w %zmm0, %zmm4, %zmm1
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm13[0],xmm5[0],xmm13[1],xmm5[1],xmm13[2],xmm5[2],xmm13[3],xmm5[3],xmm13[4],xmm5[4],xmm13[5],xmm5[5],xmm13[6],xmm5[6],xmm13[7],xmm5[7]
-; AVX512BW-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm0
-; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm18[0],xmm16[0],xmm18[1],xmm16[1],xmm18[2],xmm16[2],xmm18[3],xmm16[3],xmm18[4],xmm16[4],xmm18[5],xmm16[5],xmm18[6],xmm16[6],xmm18[7],xmm16[7]
-; AVX512BW-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm2
-; AVX512BW-NEXT:    vpermt2w %zmm0, %zmm20, %zmm2
-; AVX512BW-NEXT:    vmovdqa32 %zmm1, %zmm2 {%k1}
-; AVX512BW-NEXT:    vmovdqa64 %zmm2, (%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm9, 192(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm21, 128(%rax)
-; AVX512BW-NEXT:    vmovdqa64 %zmm6, 64(%rax)
-; AVX512BW-NEXT:    vzeroupper
-; AVX512BW-NEXT:    retq
+; AVX512BW-SLOW-LABEL: store_i8_stride8_vf32:
+; AVX512BW-SLOW:       # %bb.0:
+; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512BW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %r11
+; AVX512BW-SLOW-NEXT:    vmovdqa64 (%r11), %xmm25
+; AVX512BW-SLOW-NEXT:    vmovdqa 16(%r11), %xmm11
+; AVX512BW-SLOW-NEXT:    vmovdqa (%r10), %xmm1
+; AVX512BW-SLOW-NEXT:    vmovdqa 16(%r10), %xmm12
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm25[8],xmm1[9],xmm25[9],xmm1[10],xmm25[10],xmm1[11],xmm25[11],xmm1[12],xmm25[12],xmm1[13],xmm25[13],xmm1[14],xmm25[14],xmm1[15],xmm25[15]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa (%r9), %xmm2
+; AVX512BW-SLOW-NEXT:    vmovdqa 16(%r9), %xmm13
+; AVX512BW-SLOW-NEXT:    vmovdqa (%r8), %xmm3
+; AVX512BW-SLOW-NEXT:    vmovdqa 16(%r8), %xmm14
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm4, %zmm19
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [0,1,0,32,4,5,1,33,2,1,2,34,4,5,3,35,16,17,20,52,20,21,21,53,16,17,22,54,22,21,23,55]
+; AVX512BW-SLOW-NEXT:    vpermt2w %zmm5, %zmm23, %zmm19
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rsi), %xmm5
+; AVX512BW-SLOW-NEXT:    vmovdqa 16(%rsi), %xmm15
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rdi), %xmm6
+; AVX512BW-SLOW-NEXT:    vmovdqa64 16(%rdi), %xmm16
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15]
+; AVX512BW-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm8 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm9 = xmm7[1,1,1,1]
+; AVX512BW-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm8, %ymm10
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rcx), %xmm8
+; AVX512BW-SLOW-NEXT:    vmovdqa64 16(%rcx), %xmm17
+; AVX512BW-SLOW-NEXT:    vmovdqa (%rdx), %xmm9
+; AVX512BW-SLOW-NEXT:    vmovdqa64 16(%rdx), %xmm18
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm20 = xmm9[8],xmm8[8],xmm9[9],xmm8[9],xmm9[10],xmm8[10],xmm9[11],xmm8[11],xmm9[12],xmm8[12],xmm9[13],xmm8[13],xmm9[14],xmm8[14],xmm9[15],xmm8[15]
+; AVX512BW-SLOW-NEXT:    vpshuflw {{.*#+}} xmm21 = xmm20[0,0,2,1,4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vpmovzxdq {{.*#+}} xmm21 = xmm21[0],zero,xmm21[1],zero
+; AVX512BW-SLOW-NEXT:    vpshuflw {{.*#+}} xmm22 = xmm20[0,2,2,3,4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vpmovzxdq {{.*#+}} xmm22 = xmm22[0],zero,xmm22[1],zero
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $1, %xmm22, %ymm21, %ymm4
+; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm10[0],ymm4[1],ymm10[2,3,4],ymm4[5],ymm10[6,7,8],ymm4[9],ymm10[10,11,12],ymm4[13],ymm10[14,15]
+; AVX512BW-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm22 = <4,20,u,u,5,21,u,u,6,22,u,u,7,23,u,u>
+; AVX512BW-SLOW-NEXT:    vpermt2w %ymm20, %ymm22, %ymm7
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm7
+; AVX512BW-SLOW-NEXT:    movw $-21846, %cx # imm = 0xAAAA
+; AVX512BW-SLOW-NEXT:    kmovd %ecx, %k1
+; AVX512BW-SLOW-NEXT:    vmovdqa32 %zmm19, %zmm7 {%k1}
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm12[0],xmm11[0],xmm12[1],xmm11[1],xmm12[2],xmm11[2],xmm12[3],xmm11[3],xmm12[4],xmm11[4],xmm12[5],xmm11[5],xmm12[6],xmm11[6],xmm12[7],xmm11[7]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm4, %zmm4
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm19 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm19, %zmm19, %zmm20
+; AVX512BW-SLOW-NEXT:    vpermt2w %zmm4, %zmm23, %zmm20
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm16[0],xmm15[0],xmm16[1],xmm15[1],xmm16[2],xmm15[2],xmm16[3],xmm15[3],xmm16[4],xmm15[4],xmm16[5],xmm15[5],xmm16[6],xmm15[6],xmm16[7],xmm15[7]
+; AVX512BW-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm19 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm21 = xmm4[1,1,1,1]
+; AVX512BW-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm21 = xmm21[0],zero,zero,zero,xmm21[1],zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $1, %xmm21, %ymm19, %ymm10
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm19 = xmm18[0],xmm17[0],xmm18[1],xmm17[1],xmm18[2],xmm17[2],xmm18[3],xmm17[3],xmm18[4],xmm17[4],xmm18[5],xmm17[5],xmm18[6],xmm17[6],xmm18[7],xmm17[7]
+; AVX512BW-SLOW-NEXT:    vpshuflw {{.*#+}} xmm21 = xmm19[0,0,2,1,4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vpmovzxdq {{.*#+}} xmm21 = xmm21[0],zero,xmm21[1],zero
+; AVX512BW-SLOW-NEXT:    vpshuflw {{.*#+}} xmm24 = xmm19[0,2,2,3,4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vpmovzxdq {{.*#+}} xmm24 = xmm24[0],zero,xmm24[1],zero
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $1, %xmm24, %ymm21, %ymm0
+; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm10[0],ymm0[1],ymm10[2,3,4],ymm0[5],ymm10[6,7,8],ymm0[9],ymm10[10,11,12],ymm0[13],ymm10[14,15]
+; AVX512BW-SLOW-NEXT:    vpermt2w %ymm19, %ymm22, %ymm4
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm19
+; AVX512BW-SLOW-NEXT:    vmovdqa32 %zmm20, %zmm19 {%k1}
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm12[8],xmm11[8],xmm12[9],xmm11[9],xmm12[10],xmm11[10],xmm12[11],xmm11[11],xmm12[12],xmm11[12],xmm12[13],xmm11[13],xmm12[14],xmm11[14],xmm12[15],xmm11[15]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm0
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm14[8],xmm13[8],xmm14[9],xmm13[9],xmm14[10],xmm13[10],xmm14[11],xmm13[11],xmm14[12],xmm13[12],xmm14[13],xmm13[13],xmm14[14],xmm13[14],xmm14[15],xmm13[15]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm4, %zmm4
+; AVX512BW-SLOW-NEXT:    vpermt2w %zmm0, %zmm23, %zmm4
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm16[8],xmm15[8],xmm16[9],xmm15[9],xmm16[10],xmm15[10],xmm16[11],xmm15[11],xmm16[12],xmm15[12],xmm16[13],xmm15[13],xmm16[14],xmm15[14],xmm16[15],xmm15[15]
+; AVX512BW-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm0[1,1,1,1]
+; AVX512BW-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm11 = xmm11[0],zero,zero,zero,xmm11[1],zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm11, %ymm10, %ymm10
+; AVX512BW-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm18[8],xmm17[8],xmm18[9],xmm17[9],xmm18[10],xmm17[10],xmm18[11],xmm17[11],xmm18[12],xmm17[12],xmm18[13],xmm17[13],xmm18[14],xmm17[14],xmm18[15],xmm17[15]
+; AVX512BW-SLOW-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm11[0,0,2,1,4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vpmovzxdq {{.*#+}} xmm12 = xmm12[0],zero,xmm12[1],zero
+; AVX512BW-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm11[0,2,2,3,4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vpmovzxdq {{.*#+}} xmm13 = xmm13[0],zero,xmm13[1],zero
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm13, %ymm12, %ymm12
+; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0],ymm12[1],ymm10[2,3,4],ymm12[5],ymm10[6,7,8],ymm12[9],ymm10[10,11,12],ymm12[13],ymm10[14,15]
+; AVX512BW-SLOW-NEXT:    vpermt2w %ymm11, %ymm22, %ymm0
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm10, %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa32 %zmm4, %zmm0 {%k1}
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm1[0],xmm25[0],xmm1[1],xmm25[1],xmm1[2],xmm25[2],xmm1[3],xmm25[3],xmm1[4],xmm25[4],xmm1[5],xmm25[5],xmm1[6],xmm25[6],xmm1[7],xmm25[7]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm1, %zmm1
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; AVX512BW-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm2
+; AVX512BW-SLOW-NEXT:    vpermt2w %zmm1, %zmm23, %zmm2
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; AVX512BW-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm1[1,1,1,1]
+; AVX512BW-SLOW-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm3, %ymm3
+; AVX512BW-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; AVX512BW-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm4[0,0,2,1,4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vpmovzxdq {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero
+; AVX512BW-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm4[0,2,2,3,4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vpmovzxdq {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero
+; AVX512BW-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm5, %ymm5
+; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3,4],ymm5[5],ymm3[6,7,8],ymm5[9],ymm3[10,11,12],ymm5[13],ymm3[14,15]
+; AVX512BW-SLOW-NEXT:    vpermt2w %ymm4, %ymm22, %ymm1
+; AVX512BW-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512BW-SLOW-NEXT:    vmovdqa32 %zmm2, %zmm1 {%k1}
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm1, (%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, 192(%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm19, 128(%rax)
+; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm7, 64(%rax)
+; AVX512BW-SLOW-NEXT:    vzeroupper
+; AVX512BW-SLOW-NEXT:    retq
+;
+; AVX512BW-FAST-LABEL: store_i8_stride8_vf32:
+; AVX512BW-FAST:       # %bb.0:
+; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
+; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r10
+; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %r11
+; AVX512BW-FAST-NEXT:    vmovdqa (%r11), %xmm0
+; AVX512BW-FAST-NEXT:    vmovdqa 16(%r11), %xmm7
+; AVX512BW-FAST-NEXT:    vmovdqa (%r10), %xmm1
+; AVX512BW-FAST-NEXT:    vmovdqa 16(%r10), %xmm8
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm5
+; AVX512BW-FAST-NEXT:    vmovdqa (%r9), %xmm2
+; AVX512BW-FAST-NEXT:    vmovdqa 16(%r9), %xmm9
+; AVX512BW-FAST-NEXT:    vmovdqa (%r8), %xmm3
+; AVX512BW-FAST-NEXT:    vmovdqa 16(%r8), %xmm10
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm4, %zmm11
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [0,1,0,32,4,5,1,33,2,1,2,34,4,5,3,35,16,17,20,52,20,21,21,53,16,17,22,54,22,21,23,55]
+; AVX512BW-FAST-NEXT:    vpermt2w %zmm5, %zmm4, %zmm11
+; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %xmm5
+; AVX512BW-FAST-NEXT:    vmovdqa 16(%rcx), %xmm12
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm13
+; AVX512BW-FAST-NEXT:    vmovdqa 16(%rdx), %xmm14
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm13[8],xmm5[8],xmm13[9],xmm5[9],xmm13[10],xmm5[10],xmm13[11],xmm5[11],xmm13[12],xmm5[12],xmm13[13],xmm5[13],xmm13[14],xmm5[14],xmm13[15],xmm5[15]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm6, %zmm15
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rsi), %xmm16
+; AVX512BW-FAST-NEXT:    vmovdqa64 16(%rsi), %xmm17
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %xmm18
+; AVX512BW-FAST-NEXT:    vmovdqa64 16(%rdi), %xmm19
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm18[8],xmm16[8],xmm18[9],xmm16[9],xmm18[10],xmm16[10],xmm18[11],xmm16[11],xmm18[12],xmm16[12],xmm18[13],xmm16[13],xmm18[14],xmm16[14],xmm18[15],xmm16[15]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm6, %zmm6
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = <0,32,u,u,1,33,u,u,2,34,u,u,3,35,u,u,20,52,u,u,21,53,u,u,22,54,u,u,23,55,u,u>
+; AVX512BW-FAST-NEXT:    vpermt2w %zmm15, %zmm20, %zmm6
+; AVX512BW-FAST-NEXT:    movw $-21846, %cx # imm = 0xAAAA
+; AVX512BW-FAST-NEXT:    kmovd %ecx, %k1
+; AVX512BW-FAST-NEXT:    vmovdqa32 %zmm11, %zmm6 {%k1}
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm11, %zmm11, %zmm11
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm15, %zmm15, %zmm15
+; AVX512BW-FAST-NEXT:    vpermt2w %zmm11, %zmm4, %zmm15
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm14[0],xmm12[0],xmm14[1],xmm12[1],xmm14[2],xmm12[2],xmm14[3],xmm12[3],xmm14[4],xmm12[4],xmm14[5],xmm12[5],xmm14[6],xmm12[6],xmm14[7],xmm12[7]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm11, %zmm11, %zmm11
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm21 = xmm19[0],xmm17[0],xmm19[1],xmm17[1],xmm19[2],xmm17[2],xmm19[3],xmm17[3],xmm19[4],xmm17[4],xmm19[5],xmm17[5],xmm19[6],xmm17[6],xmm19[7],xmm17[7]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm21, %zmm21, %zmm21
+; AVX512BW-FAST-NEXT:    vpermt2w %zmm11, %zmm20, %zmm21
+; AVX512BW-FAST-NEXT:    vmovdqa32 %zmm15, %zmm21 {%k1}
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm7, %zmm7, %zmm7
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm8, %zmm8, %zmm8
+; AVX512BW-FAST-NEXT:    vpermt2w %zmm7, %zmm4, %zmm8
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm14[8],xmm12[8],xmm14[9],xmm12[9],xmm14[10],xmm12[10],xmm14[11],xmm12[11],xmm14[12],xmm12[12],xmm14[13],xmm12[13],xmm14[14],xmm12[14],xmm14[15],xmm12[15]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm7, %zmm7, %zmm7
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm19[8],xmm17[8],xmm19[9],xmm17[9],xmm19[10],xmm17[10],xmm19[11],xmm17[11],xmm19[12],xmm17[12],xmm19[13],xmm17[13],xmm19[14],xmm17[14],xmm19[15],xmm17[15]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm9, %zmm9, %zmm9
+; AVX512BW-FAST-NEXT:    vpermt2w %zmm7, %zmm20, %zmm9
+; AVX512BW-FAST-NEXT:    vmovdqa32 %zmm8, %zmm9 {%k1}
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm0
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm1, %zmm1
+; AVX512BW-FAST-NEXT:    vpermt2w %zmm0, %zmm4, %zmm1
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm13[0],xmm5[0],xmm13[1],xmm5[1],xmm13[2],xmm5[2],xmm13[3],xmm5[3],xmm13[4],xmm5[4],xmm13[5],xmm5[5],xmm13[6],xmm5[6],xmm13[7],xmm5[7]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm0
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm18[0],xmm16[0],xmm18[1],xmm16[1],xmm18[2],xmm16[2],xmm18[3],xmm16[3],xmm18[4],xmm16[4],xmm18[5],xmm16[5],xmm18[6],xmm16[6],xmm18[7],xmm16[7]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm2, %zmm2
+; AVX512BW-FAST-NEXT:    vpermt2w %zmm0, %zmm20, %zmm2
+; AVX512BW-FAST-NEXT:    vmovdqa32 %zmm1, %zmm2 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm9, 192(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm21, 128(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm6, 64(%rax)
+; AVX512BW-FAST-NEXT:    vzeroupper
+; AVX512BW-FAST-NEXT:    retq
   %in.vec0 = load <32 x i8>, ptr %in.vecptr0, align 64
   %in.vec1 = load <32 x i8>, ptr %in.vecptr1, align 64
   %in.vec2 = load <32 x i8>, ptr %in.vecptr2, align 64
@@ -6913,6 +7025,7 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 }
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; AVX1: {{.*}}
+; AVX512BW: {{.*}}
 ; AVX512BW-ONLY-FAST: {{.*}}
 ; AVX512BW-ONLY-SLOW: {{.*}}
 ; AVX512DQBW-FAST: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/vector-pack-512.ll b/llvm/test/CodeGen/X86/vector-pack-512.ll
index 90af3c382ce86..31ef3c6d8fb8b 100644
--- a/llvm/test/CodeGen/X86/vector-pack-512.ll
+++ b/llvm/test/CodeGen/X86/vector-pack-512.ll
@@ -186,6 +186,7 @@ define <64 x i8> @concat_trunc_packsswb_512(<32 x i16> %a0, <32 x i16> %a1) noun
 ; AVX512F-NEXT:    vpsraw $15, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
 ; AVX512F-NEXT:    vpmovdb %zmm2, %xmm2
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm3 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
@@ -195,9 +196,9 @@ define <64 x i8> @concat_trunc_packsswb_512(<32 x i16> %a0, <32 x i16> %a1) noun
 ; AVX512F-NEXT:    vpmovdb %zmm1, %xmm1
 ; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm3, %ymm1
 ; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm2
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm1, %zmm1
-; AVX512F-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm2
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,8,1,9,4,14,5,15]
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,8,1,9,6,14,7,15]
 ; AVX512F-NEXT:    vpermi2q %zmm1, %zmm2, %zmm0
 ; AVX512F-NEXT:    retq
 ;
@@ -228,6 +229,7 @@ define <64 x i8> @concat_trunc_packuswb_512(<32 x i16> %a0, <32 x i16> %a1) noun
 ; AVX512F-NEXT:    vpsrlw $15, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
 ; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
 ; AVX512F-NEXT:    vpmovdb %zmm2, %xmm2
 ; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm3 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
@@ -237,9 +239,9 @@ define <64 x i8> @concat_trunc_packuswb_512(<32 x i16> %a0, <32 x i16> %a1) noun
 ; AVX512F-NEXT:    vpmovdb %zmm1, %xmm1
 ; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm3, %ymm1
 ; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm2
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm1, %zmm1
-; AVX512F-NEXT:    vinserti32x4 $2, %xmm0, %zmm2, %zmm2
-; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,8,1,9,4,14,5,15]
+; AVX512F-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [0,8,1,9,6,14,7,15]
 ; AVX512F-NEXT:    vpermi2q %zmm1, %zmm2, %zmm0
 ; AVX512F-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
index a13760e0954d3..2afdee01e453d 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll
@@ -754,34 +754,12 @@ define <4 x double> @shuffle_v4f64_0044(<4 x double> %a, <4 x double> %b) {
 }
 
 define <4 x double> @shuffle_v4f64_0044_v2f64(<2 x double> %a, <2 x double> %b) {
-; AVX1OR2-LABEL: shuffle_v4f64_0044_v2f64:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; AVX1OR2-NEXT:    retq
-;
-; AVX512VL-SLOW-LABEL: shuffle_v4f64_0044_v2f64:
-; AVX512VL-SLOW:       # %bb.0:
-; AVX512VL-SLOW-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-SLOW-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; AVX512VL-SLOW-NEXT:    retq
-;
-; AVX512VL-FAST-ALL-LABEL: shuffle_v4f64_0044_v2f64:
-; AVX512VL-FAST-ALL:       # %bb.0:
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-ALL-NEXT:    vmovapd {{.*#+}} ymm2 = [0,0,4,4]
-; AVX512VL-FAST-ALL-NEXT:    vpermt2pd %ymm1, %ymm2, %ymm0
-; AVX512VL-FAST-ALL-NEXT:    retq
-;
-; AVX512VL-FAST-PERLANE-LABEL: shuffle_v4f64_0044_v2f64:
-; AVX512VL-FAST-PERLANE:       # %bb.0:
-; AVX512VL-FAST-PERLANE-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
-; AVX512VL-FAST-PERLANE-NEXT:    retq
+; ALL-LABEL: shuffle_v4f64_0044_v2f64:
+; ALL:       # %bb.0:
+; ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2]
+; ALL-NEXT:    retq
   %1 = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> <i32 0, i32 0>
   %2 = shufflevector <2 x double> %b, <2 x double> undef, <2 x i32> <i32 0, i32 0>
   %3 = shufflevector <2 x double> %1, <2 x double> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -789,34 +767,12 @@ define <4 x double> @shuffle_v4f64_0044_v2f64(<2 x double> %a, <2 x double> %b)
 }
 
 define <4 x double> @shuffle_v4f64_1032_v2f64(<2 x double> %a, <2 x double> %b) {
-; AVX1OR2-LABEL: shuffle_v4f64_1032_v2f64:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
-; AVX1OR2-NEXT:    retq
-;
-; AVX512VL-SLOW-LABEL: shuffle_v4f64_1032_v2f64:
-; AVX512VL-SLOW:       # %bb.0:
-; AVX512VL-SLOW-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-SLOW-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
-; AVX512VL-SLOW-NEXT:    retq
-;
-; AVX512VL-FAST-ALL-LABEL: shuffle_v4f64_1032_v2f64:
-; AVX512VL-FAST-ALL:       # %bb.0:
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-ALL-NEXT:    vmovapd {{.*#+}} ymm2 = [1,0,5,4]
-; AVX512VL-FAST-ALL-NEXT:    vpermt2pd %ymm1, %ymm2, %ymm0
-; AVX512VL-FAST-ALL-NEXT:    retq
-;
-; AVX512VL-FAST-PERLANE-LABEL: shuffle_v4f64_1032_v2f64:
-; AVX512VL-FAST-PERLANE:       # %bb.0:
-; AVX512VL-FAST-PERLANE-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
-; AVX512VL-FAST-PERLANE-NEXT:    retq
+; ALL-LABEL: shuffle_v4f64_1032_v2f64:
+; ALL:       # %bb.0:
+; ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
+; ALL-NEXT:    retq
   %1 = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> <i32 1, i32 0>
   %2 = shufflevector <2 x double> %b, <2 x double> undef, <2 x i32> <i32 1, i32 0>
   %3 = shufflevector <2 x double> %1, <2 x double> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1645,34 +1601,12 @@ define <4 x i64> @shuffle_v4i64_0044_v2i64(<2 x i64> %a, <2 x i64> %b) {
 }
 
 define <4 x i64> @shuffle_v4i64_1032_v2i64(<2 x i64> %a, <2 x i64> %b) {
-; AVX1OR2-LABEL: shuffle_v4i64_1032_v2i64:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
-; AVX1OR2-NEXT:    retq
-;
-; AVX512VL-SLOW-LABEL: shuffle_v4i64_1032_v2i64:
-; AVX512VL-SLOW:       # %bb.0:
-; AVX512VL-SLOW-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
-; AVX512VL-SLOW-NEXT:    retq
-;
-; AVX512VL-FAST-ALL-LABEL: shuffle_v4i64_1032_v2i64:
-; AVX512VL-FAST-ALL:       # %bb.0:
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm2 = [1,0,5,4]
-; AVX512VL-FAST-ALL-NEXT:    vpermt2q %ymm1, %ymm2, %ymm0
-; AVX512VL-FAST-ALL-NEXT:    retq
-;
-; AVX512VL-FAST-PERLANE-LABEL: shuffle_v4i64_1032_v2i64:
-; AVX512VL-FAST-PERLANE:       # %bb.0:
-; AVX512VL-FAST-PERLANE-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
-; AVX512VL-FAST-PERLANE-NEXT:    retq
+; ALL-LABEL: shuffle_v4i64_1032_v2i64:
+; ALL:       # %bb.0:
+; ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5]
+; ALL-NEXT:    retq
   %1 = shufflevector <2 x i64> %a, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
   %2 = shufflevector <2 x i64> %b, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
   %3 = shufflevector <2 x i64> %1, <2 x i64> %2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll
index 6c386b5773e63..1546a58bc8401 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll
@@ -626,34 +626,12 @@ define <8 x float> @shuffle_v8f32_00224466(<8 x float> %a, <8 x float> %b) {
 }
 
 define <8 x float> @shuffle_v8f32_00224466_v4f32(<4 x float> %a, <4 x float> %b) {
-; AVX1OR2-LABEL: shuffle_v8f32_00224466_v4f32:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
-; AVX1OR2-NEXT:    retq
-;
-; AVX512VL-SLOW-LABEL: shuffle_v8f32_00224466_v4f32:
-; AVX512VL-SLOW:       # %bb.0:
-; AVX512VL-SLOW-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-SLOW-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
-; AVX512VL-SLOW-NEXT:    retq
-;
-; AVX512VL-FAST-ALL-LABEL: shuffle_v8f32_00224466_v4f32:
-; AVX512VL-FAST-ALL:       # %bb.0:
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-ALL-NEXT:    vmovaps {{.*#+}} ymm2 = [0,0,2,2,8,8,10,10]
-; AVX512VL-FAST-ALL-NEXT:    vpermt2ps %ymm1, %ymm2, %ymm0
-; AVX512VL-FAST-ALL-NEXT:    retq
-;
-; AVX512VL-FAST-PERLANE-LABEL: shuffle_v8f32_00224466_v4f32:
-; AVX512VL-FAST-PERLANE:       # %bb.0:
-; AVX512VL-FAST-PERLANE-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
-; AVX512VL-FAST-PERLANE-NEXT:    retq
+; ALL-LABEL: shuffle_v8f32_00224466_v4f32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
+; ALL-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> %a, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
   %2 = shufflevector <4 x float> %b, <4 x float> %b, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
   %3 = shufflevector <4 x float> %1, <4 x float> %2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -661,34 +639,12 @@ define <8 x float> @shuffle_v8f32_00224466_v4f32(<4 x float> %a, <4 x float> %b)
 }
 
 define <8 x float> @shuffle_v8f32_00004444_v4f32(<4 x float> %a, <4 x float> %b) {
-; AVX1OR2-LABEL: shuffle_v8f32_00004444_v4f32:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX1OR2-NEXT:    retq
-;
-; AVX512VL-SLOW-LABEL: shuffle_v8f32_00004444_v4f32:
-; AVX512VL-SLOW:       # %bb.0:
-; AVX512VL-SLOW-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX512VL-SLOW-NEXT:    retq
-;
-; AVX512VL-FAST-ALL-LABEL: shuffle_v8f32_00004444_v4f32:
-; AVX512VL-FAST-ALL:       # %bb.0:
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-ALL-NEXT:    vmovaps {{.*#+}} ymm2 = [0,0,0,0,8,8,8,8]
-; AVX512VL-FAST-ALL-NEXT:    vpermt2ps %ymm1, %ymm2, %ymm0
-; AVX512VL-FAST-ALL-NEXT:    retq
-;
-; AVX512VL-FAST-PERLANE-LABEL: shuffle_v8f32_00004444_v4f32:
-; AVX512VL-FAST-PERLANE:       # %bb.0:
-; AVX512VL-FAST-PERLANE-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX512VL-FAST-PERLANE-NEXT:    retq
+; ALL-LABEL: shuffle_v8f32_00004444_v4f32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
+; ALL-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
   ret <8 x float> %1
 }
@@ -712,34 +668,12 @@ define <8 x float> @shuffle_v8f32_11335577(<8 x float> %a, <8 x float> %b) {
 }
 
 define <8 x float> @shuffle_v8f32_11335577_v4f32(<4 x float> %a, <4 x float> %b) {
-; AVX1OR2-LABEL: shuffle_v8f32_11335577_v4f32:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
-; AVX1OR2-NEXT:    retq
-;
-; AVX512VL-SLOW-LABEL: shuffle_v8f32_11335577_v4f32:
-; AVX512VL-SLOW:       # %bb.0:
-; AVX512VL-SLOW-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-SLOW-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
-; AVX512VL-SLOW-NEXT:    retq
-;
-; AVX512VL-FAST-ALL-LABEL: shuffle_v8f32_11335577_v4f32:
-; AVX512VL-FAST-ALL:       # %bb.0:
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-ALL-NEXT:    vmovaps {{.*#+}} ymm2 = [1,1,3,3,9,9,11,11]
-; AVX512VL-FAST-ALL-NEXT:    vpermt2ps %ymm1, %ymm2, %ymm0
-; AVX512VL-FAST-ALL-NEXT:    retq
-;
-; AVX512VL-FAST-PERLANE-LABEL: shuffle_v8f32_11335577_v4f32:
-; AVX512VL-FAST-PERLANE:       # %bb.0:
-; AVX512VL-FAST-PERLANE-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
-; AVX512VL-FAST-PERLANE-NEXT:    retq
+; ALL-LABEL: shuffle_v8f32_11335577_v4f32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
+; ALL-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> %a, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
   %2 = shufflevector <4 x float> %b, <4 x float> %b, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
   %3 = shufflevector <4 x float> %1, <4 x float> %2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -1631,34 +1565,12 @@ define <8 x float> @shuffle_v8f32_5555uuuu(<8 x float> %a, <8 x float> %b) {
 }
 
 define <8 x float> @shuffle_v8f32_32107654_v4f32(<4 x float> %a, <4 x float> %b) {
-; AVX1OR2-LABEL: shuffle_v8f32_32107654_v4f32:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
-; AVX1OR2-NEXT:    retq
-;
-; AVX512VL-SLOW-LABEL: shuffle_v8f32_32107654_v4f32:
-; AVX512VL-SLOW:       # %bb.0:
-; AVX512VL-SLOW-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
-; AVX512VL-SLOW-NEXT:    retq
-;
-; AVX512VL-FAST-ALL-LABEL: shuffle_v8f32_32107654_v4f32:
-; AVX512VL-FAST-ALL:       # %bb.0:
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-ALL-NEXT:    vmovaps {{.*#+}} ymm2 = [3,2,1,0,11,10,9,8]
-; AVX512VL-FAST-ALL-NEXT:    vpermt2ps %ymm1, %ymm2, %ymm0
-; AVX512VL-FAST-ALL-NEXT:    retq
-;
-; AVX512VL-FAST-PERLANE-LABEL: shuffle_v8f32_32107654_v4f32:
-; AVX512VL-FAST-PERLANE:       # %bb.0:
-; AVX512VL-FAST-PERLANE-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
-; AVX512VL-FAST-PERLANE-NEXT:    retq
+; ALL-LABEL: shuffle_v8f32_32107654_v4f32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
+; ALL-NEXT:    retq
   %1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
   %2 = shufflevector <4 x float> %b, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
   %3 = shufflevector <4 x float> %1, <4 x float> %2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -3357,34 +3269,12 @@ define <8 x i32> @shuffle_v8i32_uuuuuu7u(<8 x i32> %a, <8 x i32> %b) nounwind {
 }
 
 define <8 x i32> @shuffle_v8i32_32107654_v4i32(<4 x i32> %a, <4 x i32> %b) {
-; AVX1OR2-LABEL: shuffle_v8i32_32107654_v4i32:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
-; AVX1OR2-NEXT:    retq
-;
-; AVX512VL-SLOW-LABEL: shuffle_v8i32_32107654_v4i32:
-; AVX512VL-SLOW:       # %bb.0:
-; AVX512VL-SLOW-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
-; AVX512VL-SLOW-NEXT:    retq
-;
-; AVX512VL-FAST-ALL-LABEL: shuffle_v8i32_32107654_v4i32:
-; AVX512VL-FAST-ALL:       # %bb.0:
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm2 = [3,2,1,0,11,10,9,8]
-; AVX512VL-FAST-ALL-NEXT:    vpermt2d %ymm1, %ymm2, %ymm0
-; AVX512VL-FAST-ALL-NEXT:    retq
-;
-; AVX512VL-FAST-PERLANE-LABEL: shuffle_v8i32_32107654_v4i32:
-; AVX512VL-FAST-PERLANE:       # %bb.0:
-; AVX512VL-FAST-PERLANE-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
-; AVX512VL-FAST-PERLANE-NEXT:    retq
+; ALL-LABEL: shuffle_v8i32_32107654_v4i32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
+; ALL-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
   %2 = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
   %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -3392,34 +3282,12 @@ define <8 x i32> @shuffle_v8i32_32107654_v4i32(<4 x i32> %a, <4 x i32> %b) {
 }
 
 define <8 x i32> @shuffle_v8i32_00004444_v4f32(<4 x i32> %a, <4 x i32> %b) {
-; AVX1OR2-LABEL: shuffle_v8i32_00004444_v4f32:
-; AVX1OR2:       # %bb.0:
-; AVX1OR2-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1OR2-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX1OR2-NEXT:    retq
-;
-; AVX512VL-SLOW-LABEL: shuffle_v8i32_00004444_v4f32:
-; AVX512VL-SLOW:       # %bb.0:
-; AVX512VL-SLOW-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-SLOW-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX512VL-SLOW-NEXT:    retq
-;
-; AVX512VL-FAST-ALL-LABEL: shuffle_v8i32_00004444_v4f32:
-; AVX512VL-FAST-ALL:       # %bb.0:
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm1 killed $xmm1 def $ymm1
-; AVX512VL-FAST-ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-ALL-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,0,0,0,8,8,8,8]
-; AVX512VL-FAST-ALL-NEXT:    vpermt2d %ymm1, %ymm2, %ymm0
-; AVX512VL-FAST-ALL-NEXT:    retq
-;
-; AVX512VL-FAST-PERLANE-LABEL: shuffle_v8i32_00004444_v4f32:
-; AVX512VL-FAST-PERLANE:       # %bb.0:
-; AVX512VL-FAST-PERLANE-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX512VL-FAST-PERLANE-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
-; AVX512VL-FAST-PERLANE-NEXT:    retq
+; ALL-LABEL: shuffle_v8i32_00004444_v4f32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; ALL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; ALL-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4]
+; ALL-NEXT:    retq
   %1 = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4>
   ret <8 x i32> %1
 }
@@ -3809,25 +3677,38 @@ entry:
 ; This test used to crash due to bad handling of concat_vectors after a bitcast
 ; in lowerVectorShuffleAsBroadcast.
 define <8 x float> @broadcast_concat_crash(<4 x float> %x, <4 x float> %y, float %z) {
-; AVX1OR2-LABEL: broadcast_concat_crash:
-; AVX1OR2:       # %bb.0: # %entry
-; AVX1OR2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm1[3,3,3,3]
-; AVX1OR2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
-; AVX1OR2-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1OR2-NEXT:    retq
+; AVX1-LABEL: broadcast_concat_crash:
+; AVX1:       # %bb.0: # %entry
+; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm1[3,3,3,3]
+; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: broadcast_concat_crash:
+; AVX2:       # %bb.0: # %entry
+; AVX2-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
+; AVX2-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX2-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
+; AVX2-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX2-NEXT:    retq
 ;
 ; AVX512VL-SLOW-LABEL: broadcast_concat_crash:
 ; AVX512VL-SLOW:       # %bb.0: # %entry
-; AVX512VL-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm1[3,3,3,3]
+; AVX512VL-SLOW-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-SLOW-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
+; AVX512VL-SLOW-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; AVX512VL-SLOW-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
 ; AVX512VL-SLOW-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
 ; AVX512VL-SLOW-NEXT:    retq
 ;
 ; AVX512VL-FAST-LABEL: broadcast_concat_crash:
 ; AVX512VL-FAST:       # %bb.0: # %entry
-; AVX512VL-FAST-NEXT:    vmovaps {{.*#+}} xmm0 = [3,4,3,3]
-; AVX512VL-FAST-NEXT:    vpermi2ps %xmm2, %xmm1, %xmm0
-; AVX512VL-FAST-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX512VL-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX512VL-FAST-NEXT:    vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,3]
+; AVX512VL-FAST-NEXT:    vmovaps {{.*#+}} xmm1 = [1,4,3,3]
+; AVX512VL-FAST-NEXT:    vpermi2ps %xmm2, %xmm0, %xmm1
+; AVX512VL-FAST-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512VL-FAST-NEXT:    retq
 entry:
   %tmp = shufflevector <4 x float> %x, <4 x float> %y, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-512-v64.ll b/llvm/test/CodeGen/X86/vector-shuffle-512-v64.ll
index 722f60e998338..65be0085cac25 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-512-v64.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-512-v64.ll
@@ -167,7 +167,8 @@ define <64 x i8> @shuffle_v64i8_63_62_61_60_59_58_57_56_55_54_53_52_51_50_49_48_
 ; AVX512F-NEXT:    vpshufb %ymm1, %ymm0, %ymm2
 ; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
 ; AVX512F-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],zmm2[2,3,0,1]
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512F-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,0,1,6,7,4,5]
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: shuffle_v64i8_63_62_61_60_59_58_57_56_55_54_53_52_51_50_49_48_47_46_45_44_43_42_41_40_39_38_37_36_35_34_33_32_31_30_29_28_27_26_25_24_23_22_21_20_19_18_17_16_15_14_13_12_11_10_09_08_07_06_05_04_03_02_01_00:
@@ -182,7 +183,8 @@ define <64 x i8> @shuffle_v64i8_63_62_61_60_59_58_57_56_55_54_53_52_51_50_49_48_
 ; AVX512DQ-NEXT:    vpshufb %ymm1, %ymm0, %ymm2
 ; AVX512DQ-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
 ; AVX512DQ-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
-; AVX512DQ-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],zmm2[2,3,0,1]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512DQ-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,0,1,6,7,4,5]
 ; AVX512DQ-NEXT:    retq
 ;
 ; AVX512VBMI-LABEL: shuffle_v64i8_63_62_61_60_59_58_57_56_55_54_53_52_51_50_49_48_47_46_45_44_43_42_41_40_39_38_37_36_35_34_33_32_31_30_29_28_27_26_25_24_23_22_21_20_19_18_17_16_15_14_13_12_11_10_09_08_07_06_05_04_03_02_01_00:
@@ -208,10 +210,11 @@ define <64 x i8> @shuffle_v64i8_02_03_04_05_06_07_00_01_10_11_12_13_14_15_08_09_
 define <64 x i8> @shuffle_v64i8_01_03_02_05_07_06_09_11_10_13_15_14_17_19_18_21_23_22_25_27_26_29_31_30_33_35_34_37_39_38_41_43_42_45_47_46_49_51_50_53_55_54_57_59_58_61_63_62_01_03_02_05_01_03_02_05_01_03_02_05_01_03_02_05(<64 x i8> %a) {
 ; AVX512F-LABEL: shuffle_v64i8_01_03_02_05_07_06_09_11_10_13_15_14_17_19_18_21_23_22_25_27_26_29_31_30_33_35_34_37_39_38_41_43_42_45_47_46_49_51_50_53_55_54_57_59_58_61_63_62_01_03_02_05_01_03_02_05_01_03_02_05_01_03_02_05:
 ; AVX512F:       # %bb.0:
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,2,4,5,6,18,19]
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,2,4,5,6,22,23]
 ; AVX512F-NEXT:    vpshufb {{.*#+}} ymm2 = ymm0[1,3,2,5,7,6,9,11,10,13,15,14,u,u,u,u,17,19,18,21,23,22,25,27,26,29,31,30,u,u,u,u]
 ; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm3
 ; AVX512F-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u,u,u,1,3,2,5,7,6,9,11]
+; AVX512F-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512F-NEXT:    vpermt2d %zmm4, %zmm1, %zmm2
 ; AVX512F-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3,4,5,6,7]
 ; AVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[10,13,15,14,1,3,2,5,u,u,u,u,u,u,u,u,26,29,31,30,17,19,18,21,23,22,25,27,u,u,u,u]
@@ -229,10 +232,11 @@ define <64 x i8> @shuffle_v64i8_01_03_02_05_07_06_09_11_10_13_15_14_17_19_18_21_
 ;
 ; AVX512DQ-LABEL: shuffle_v64i8_01_03_02_05_07_06_09_11_10_13_15_14_17_19_18_21_23_22_25_27_26_29_31_30_33_35_34_37_39_38_41_43_42_45_47_46_49_51_50_53_55_54_57_59_58_61_63_62_01_03_02_05_01_03_02_05_01_03_02_05_01_03_02_05:
 ; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,2,4,5,6,18,19]
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,2,4,5,6,22,23]
 ; AVX512DQ-NEXT:    vpshufb {{.*#+}} ymm2 = ymm0[1,3,2,5,7,6,9,11,10,13,15,14,u,u,u,u,17,19,18,21,23,22,25,27,26,29,31,30,u,u,u,u]
 ; AVX512DQ-NEXT:    vextracti64x4 $1, %zmm0, %ymm3
 ; AVX512DQ-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u,u,u,1,3,2,5,7,6,9,11]
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512DQ-NEXT:    vpermt2d %zmm4, %zmm1, %zmm2
 ; AVX512DQ-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3,4,5,6,7]
 ; AVX512DQ-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[10,13,15,14,1,3,2,5,u,u,u,u,u,u,u,u,26,29,31,30,17,19,18,21,23,22,25,27,u,u,u,u]

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
index b9014d796e8ea..91f2a6715ff7b 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
@@ -2498,7 +2498,8 @@ define <8 x i32> @combine_unneeded_subvector2(<8 x i32> %a, <8 x i32> %b) {
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
 ; AVX1-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT:    vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[2,3]
 ; AVX1-NEXT:    vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
 ; AVX1-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-v192.ll b/llvm/test/CodeGen/X86/vector-shuffle-v192.ll
index 47450ef5476f8..f7132b1ea7d23 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-v192.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-v192.ll
@@ -219,12 +219,14 @@ define <64 x i8> @f2(ptr %p0) {
 ; AVX512F-NEXT:    vmovdqa 112(%rdi), %xmm2
 ; AVX512F-NEXT:    vpshufb %xmm6, %xmm2, %xmm2
 ; AVX512F-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm0
 ; AVX512F-NEXT:    vmovdqa 80(%rdi), %xmm2
 ; AVX512F-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
 ; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm2
 ; AVX512F-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
 ; AVX512F-NEXT:    vpor %xmm1, %xmm2, %xmm1
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[0,1,0,1]
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
 ; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm0
 ; AVX512F-NEXT:    retq
 ;
@@ -258,12 +260,14 @@ define <64 x i8> @f2(ptr %p0) {
 ; AVX512BW-NEXT:    vmovdqa 112(%rdi), %xmm4
 ; AVX512BW-NEXT:    vpshufb %xmm6, %xmm4, %xmm4
 ; AVX512BW-NEXT:    vpor %xmm2, %xmm4, %xmm2
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm2
 ; AVX512BW-NEXT:    vmovdqa 80(%rdi), %xmm4
 ; AVX512BW-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
 ; AVX512BW-NEXT:    vmovdqa 64(%rdi), %xmm4
 ; AVX512BW-NEXT:    vpshufb %xmm3, %xmm4, %xmm3
 ; AVX512BW-NEXT:    vpor %xmm1, %xmm3, %xmm1
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,0,1],zmm2[0,1,0,1]
+; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm2[4,5,6,7]
 ; AVX512BW-NEXT:    movabsq $8796090925056, %rax # imm = 0x7FFFFE00000
 ; AVX512BW-NEXT:    kmovq %rax, %k1
 ; AVX512BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}
@@ -498,12 +502,14 @@ define <64 x i8> @f4(ptr %p0) {
 ; AVX512F-NEXT:    vmovdqa 112(%rdi), %xmm2
 ; AVX512F-NEXT:    vpshufb %xmm6, %xmm2, %xmm2
 ; AVX512F-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512F-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm0
 ; AVX512F-NEXT:    vmovdqa 80(%rdi), %xmm2
 ; AVX512F-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
 ; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm2
 ; AVX512F-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
 ; AVX512F-NEXT:    vpor %xmm1, %xmm2, %xmm1
-; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[0,1,0,1]
+; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
 ; AVX512F-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm0
 ; AVX512F-NEXT:    retq
 ;
@@ -537,12 +543,14 @@ define <64 x i8> @f4(ptr %p0) {
 ; AVX512BW-NEXT:    vmovdqa 112(%rdi), %xmm4
 ; AVX512BW-NEXT:    vpshufb %xmm6, %xmm4, %xmm4
 ; AVX512BW-NEXT:    vpor %xmm2, %xmm4, %xmm2
+; AVX512BW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm2
 ; AVX512BW-NEXT:    vmovdqa 80(%rdi), %xmm4
 ; AVX512BW-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
 ; AVX512BW-NEXT:    vmovdqa 64(%rdi), %xmm4
 ; AVX512BW-NEXT:    vpshufb %xmm3, %xmm4, %xmm3
 ; AVX512BW-NEXT:    vpor %xmm1, %xmm3, %xmm1
-; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,0,1],zmm2[0,1,0,1]
+; AVX512BW-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm2[4,5,6,7]
 ; AVX512BW-NEXT:    movabsq $8796090925056, %rax # imm = 0x7FFFFE00000
 ; AVX512BW-NEXT:    kmovq %rax, %k1
 ; AVX512BW-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k1}

diff  --git a/llvm/test/CodeGen/X86/vselect-avx.ll b/llvm/test/CodeGen/X86/vselect-avx.ll
index 76daa12e769ca..516fed3907c1c 100644
--- a/llvm/test/CodeGen/X86/vselect-avx.ll
+++ b/llvm/test/CodeGen/X86/vselect-avx.ll
@@ -337,16 +337,16 @@ define void @vselect_concat_splat() {
 ; AVX512-NEXT:    vmovups (%rax), %xmm1
 ; AVX512-NEXT:    vmovaps {{.*#+}} xmm2 = [0,3,6,9]
 ; AVX512-NEXT:    vpermi2ps %ymm1, %ymm0, %ymm2
-; AVX512-NEXT:    vmovups 0, %ymm3
-; AVX512-NEXT:    vmovups 32, %xmm4
+; AVX512-NEXT:    vmovups 32, %xmm3
+; AVX512-NEXT:    vmovups 0, %ymm4
 ; AVX512-NEXT:    vxorps %xmm5, %xmm5, %xmm5
 ; AVX512-NEXT:    vcmpneqps %xmm5, %xmm2, %k0
 ; AVX512-NEXT:    kshiftlw $4, %k0, %k1
 ; AVX512-NEXT:    korw %k1, %k0, %k1
 ; AVX512-NEXT:    vmovaps {{.*#+}} ymm2 = [0,3,6,9,1,4,7,10]
-; AVX512-NEXT:    vpermt2ps %ymm4, %ymm2, %ymm3
+; AVX512-NEXT:    vpermt2ps %ymm3, %ymm2, %ymm4
 ; AVX512-NEXT:    vpermt2ps %ymm1, %ymm2, %ymm0
-; AVX512-NEXT:    vmovaps %ymm3, %ymm0 {%k1}
+; AVX512-NEXT:    vmovaps %ymm4, %ymm0 {%k1}
 ; AVX512-NEXT:    vmovups %ymm0, (%rax)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/widen_fadd.ll b/llvm/test/CodeGen/X86/widen_fadd.ll
index 64f78b029f0fd..96ea5a6aff4d1 100644
--- a/llvm/test/CodeGen/X86/widen_fadd.ll
+++ b/llvm/test/CodeGen/X86/widen_fadd.ll
@@ -65,13 +65,44 @@ define void @widen_fadd_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
 ; SSE-NEXT:    movlps %xmm2, 24(%rdx)
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: widen_fadd_v2f32_v8f32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovups (%rdi), %ymm0
-; AVX-NEXT:    vaddps (%rsi), %ymm0, %ymm0
-; AVX-NEXT:    vmovups %ymm0, (%rdx)
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; AVX1OR2-LABEL: widen_fadd_v2f32_v8f32:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vmovups (%rdi), %ymm0
+; AVX1OR2-NEXT:    vaddps (%rsi), %ymm0, %ymm0
+; AVX1OR2-NEXT:    vmovups %ymm0, (%rdx)
+; AVX1OR2-NEXT:    vzeroupper
+; AVX1OR2-NEXT:    retq
+;
+; AVX512F-LABEL: widen_fadd_v2f32_v8f32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovups (%rdi), %ymm0
+; AVX512F-NEXT:    vaddps (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT:    vmovups %ymm0, (%rdx)
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: widen_fadd_v2f32_v8f32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT:    vaddps %xmm5, %xmm1, %xmm1
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512VL-NEXT:    vaddps %xmm6, %xmm3, %xmm3
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX512VL-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm3
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vaddps %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX512VL-NEXT:    vmovups %ymm0, (%rdx)
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
   %a2 = getelementptr inbounds i8, ptr %a0, i64 8
   %b2 = getelementptr inbounds i8, ptr %b0, i64 8
   %c2 = getelementptr inbounds i8, ptr %c0, i64 8

diff  --git a/llvm/test/CodeGen/X86/widen_fdiv.ll b/llvm/test/CodeGen/X86/widen_fdiv.ll
index 74caee277a08b..e4c9278478a5b 100644
--- a/llvm/test/CodeGen/X86/widen_fdiv.ll
+++ b/llvm/test/CodeGen/X86/widen_fdiv.ll
@@ -65,13 +65,44 @@ define void @widen_fdiv_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
 ; SSE-NEXT:    movlps %xmm3, 24(%rdx)
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: widen_fdiv_v2f32_v8f32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovups (%rdi), %ymm0
-; AVX-NEXT:    vdivps (%rsi), %ymm0, %ymm0
-; AVX-NEXT:    vmovups %ymm0, (%rdx)
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; AVX1OR2-LABEL: widen_fdiv_v2f32_v8f32:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vmovups (%rdi), %ymm0
+; AVX1OR2-NEXT:    vdivps (%rsi), %ymm0, %ymm0
+; AVX1OR2-NEXT:    vmovups %ymm0, (%rdx)
+; AVX1OR2-NEXT:    vzeroupper
+; AVX1OR2-NEXT:    retq
+;
+; AVX512F-LABEL: widen_fdiv_v2f32_v8f32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovups (%rdi), %ymm0
+; AVX512F-NEXT:    vdivps (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT:    vmovups %ymm0, (%rdx)
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: widen_fdiv_v2f32_v8f32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT:    vdivps %xmm5, %xmm1, %xmm1
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512VL-NEXT:    vdivps %xmm6, %xmm3, %xmm3
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX512VL-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm3
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vdivps %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX512VL-NEXT:    vmovups %ymm0, (%rdx)
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
   %a2 = getelementptr inbounds i8, ptr %a0, i64 8
   %b2 = getelementptr inbounds i8, ptr %b0, i64 8
   %c2 = getelementptr inbounds i8, ptr %c0, i64 8

diff  --git a/llvm/test/CodeGen/X86/widen_fmul.ll b/llvm/test/CodeGen/X86/widen_fmul.ll
index cfe4a07e9c3b3..4c6d5e4001e79 100644
--- a/llvm/test/CodeGen/X86/widen_fmul.ll
+++ b/llvm/test/CodeGen/X86/widen_fmul.ll
@@ -65,13 +65,44 @@ define void @widen_fmul_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
 ; SSE-NEXT:    movlps %xmm2, 24(%rdx)
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: widen_fmul_v2f32_v8f32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovups (%rdi), %ymm0
-; AVX-NEXT:    vmulps (%rsi), %ymm0, %ymm0
-; AVX-NEXT:    vmovups %ymm0, (%rdx)
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; AVX1OR2-LABEL: widen_fmul_v2f32_v8f32:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vmovups (%rdi), %ymm0
+; AVX1OR2-NEXT:    vmulps (%rsi), %ymm0, %ymm0
+; AVX1OR2-NEXT:    vmovups %ymm0, (%rdx)
+; AVX1OR2-NEXT:    vzeroupper
+; AVX1OR2-NEXT:    retq
+;
+; AVX512F-LABEL: widen_fmul_v2f32_v8f32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovups (%rdi), %ymm0
+; AVX512F-NEXT:    vmulps (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT:    vmovups %ymm0, (%rdx)
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: widen_fmul_v2f32_v8f32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT:    vmulps %xmm5, %xmm1, %xmm1
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512VL-NEXT:    vmulps %xmm6, %xmm3, %xmm3
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX512VL-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm3
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vmulps %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX512VL-NEXT:    vmovups %ymm0, (%rdx)
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
   %a2 = getelementptr inbounds i8, ptr %a0, i64 8
   %b2 = getelementptr inbounds i8, ptr %b0, i64 8
   %c2 = getelementptr inbounds i8, ptr %c0, i64 8

diff  --git a/llvm/test/CodeGen/X86/widen_fsub.ll b/llvm/test/CodeGen/X86/widen_fsub.ll
index bb834a4da69f7..91645c3329a53 100644
--- a/llvm/test/CodeGen/X86/widen_fsub.ll
+++ b/llvm/test/CodeGen/X86/widen_fsub.ll
@@ -65,13 +65,44 @@ define void @widen_fsub_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) {
 ; SSE-NEXT:    movlps %xmm3, 24(%rdx)
 ; SSE-NEXT:    retq
 ;
-; AVX-LABEL: widen_fsub_v2f32_v8f32:
-; AVX:       # %bb.0:
-; AVX-NEXT:    vmovups (%rdi), %ymm0
-; AVX-NEXT:    vsubps (%rsi), %ymm0, %ymm0
-; AVX-NEXT:    vmovups %ymm0, (%rdx)
-; AVX-NEXT:    vzeroupper
-; AVX-NEXT:    retq
+; AVX1OR2-LABEL: widen_fsub_v2f32_v8f32:
+; AVX1OR2:       # %bb.0:
+; AVX1OR2-NEXT:    vmovups (%rdi), %ymm0
+; AVX1OR2-NEXT:    vsubps (%rsi), %ymm0, %ymm0
+; AVX1OR2-NEXT:    vmovups %ymm0, (%rdx)
+; AVX1OR2-NEXT:    vzeroupper
+; AVX1OR2-NEXT:    retq
+;
+; AVX512F-LABEL: widen_fsub_v2f32_v8f32:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovups (%rdi), %ymm0
+; AVX512F-NEXT:    vsubps (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT:    vmovups %ymm0, (%rdx)
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: widen_fsub_v2f32_v8f32:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm4 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT:    vsubps %xmm5, %xmm1, %xmm1
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm5 = mem[0],zero
+; AVX512VL-NEXT:    vmovsd {{.*#+}} xmm6 = mem[0],zero
+; AVX512VL-NEXT:    vsubps %xmm6, %xmm3, %xmm3
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm3
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX512VL-NEXT:    vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm3
+; AVX512VL-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vsubps %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX512VL-NEXT:    vmovups %ymm0, (%rdx)
+; AVX512VL-NEXT:    vzeroupper
+; AVX512VL-NEXT:    retq
   %a2 = getelementptr inbounds i8, ptr %a0, i64 8
   %b2 = getelementptr inbounds i8, ptr %b0, i64 8
   %c2 = getelementptr inbounds i8, ptr %c0, i64 8

diff  --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll
index c1c4fca0a6fa6..3d9d30e107db5 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg.ll
@@ -6123,13 +6123,13 @@ define void @vec512_v64i8_to_v4i128_factor16(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX512F-FAST:       # %bb.0:
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512F-FAST-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
-; AVX512F-FAST-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,u,u,u,u,u,u,u,3,u,u,u,u,u,u,u]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <8,u,9,u,0,u,1,u>
-; AVX512F-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
-; AVX512F-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512F-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512F-FAST-NEXT:    vpandq %zmm0, %zmm2, %zmm0
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[2,u,u,u,u,u,u,u,3,u,u,u,u,u,u,u]
+; AVX512F-FAST-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,1,1,3,4,5,5,7]
+; AVX512F-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512F-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512F-FAST-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
 ; AVX512F-FAST-NEXT:    vpaddb 32(%rdx), %ymm1, %ymm1
 ; AVX512F-FAST-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
@@ -6159,13 +6159,13 @@ define void @vec512_v64i8_to_v4i128_factor16(ptr %in.vec.base.ptr, ptr %in.vec.b
 ; AVX512BW-FAST:       # %bb.0:
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm0
 ; AVX512BW-FAST-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[2,u,u,u,u,u,u,u,3,u,u,u,u,u,u,u]
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <8,u,9,u,0,u,1,u>
-; AVX512BW-FAST-NEXT:    vpermi2q %zmm1, %zmm0, %zmm2
-; AVX512BW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm0 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVX512BW-FAST-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
-; AVX512BW-FAST-NEXT:    vpandq %zmm0, %zmm2, %zmm0
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm0[2,u,u,u,u,u,u,u,3,u,u,u,u,u,u,u]
+; AVX512BW-FAST-NEXT:    vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[0,1,1,3,4,5,5,7]
+; AVX512BW-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} zmm1 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVX512BW-FAST-NEXT:    # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
+; AVX512BW-FAST-NEXT:    vpandq %zmm1, %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper

diff  --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
index 15f6a0c0b4d8d..bcabcd9da92ab 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
@@ -2031,9 +2031,9 @@ define void @vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4(ptr %in.
 ; AVX512BW-FAST-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4:
 ; AVX512BW-FAST:       # %bb.0:
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,9,0,11,0,13,0,15]
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,25,0,27,0,29,0,31]
 ; AVX512BW-FAST-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vpermd %zmm0, %zmm1, %zmm0
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm1, %zmm0
 ; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper
@@ -2575,10 +2575,10 @@ define void @vec384_i8_widen_to_i24_factor3_broadcast_to_v16i24_factor16(ptr %in
 ; AVX2-NEXT:    vpaddb 32(%rsi), %ymm0, %ymm0
 ; AVX2-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX2-NEXT:    vpaddb (%rsi), %xmm1, %xmm1
-; AVX2-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero
-; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[0,1]
-; AVX2-NEXT:    vpbroadcastb %xmm1, %xmm3
-; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX2-NEXT:    vpbroadcastb %xmm1, %xmm2
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero
+; AVX2-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0]
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm0, %ymm2, %ymm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm1 = zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero
@@ -2870,10 +2870,10 @@ define void @vec384_i8_widen_to_i48_factor6_broadcast_to_v8i48_factor8(ptr %in.v
 ; AVX2-NEXT:    vpaddb 32(%rsi), %ymm0, %ymm0
 ; AVX2-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX2-NEXT:    vpaddb (%rsi), %xmm1, %xmm1
-; AVX2-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm1[0],zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,xmm1[0],zero
-; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[0,1]
-; AVX2-NEXT:    vpbroadcastb %xmm1, %xmm3
-; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX2-NEXT:    vpbroadcastb %xmm1, %xmm2
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,xmm1[0],zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,xmm1[0],zero
+; AVX2-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255]
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm0, %ymm2, %ymm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero
@@ -3164,10 +3164,10 @@ define void @vec384_i8_widen_to_i96_factor12_broadcast_to_v4i96_factor4(ptr %in.
 ; AVX2-NEXT:    vpaddb 32(%rsi), %ymm0, %ymm0
 ; AVX2-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX2-NEXT:    vpaddb (%rsi), %xmm1, %xmm1
-; AVX2-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero
-; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[0,1]
-; AVX2-NEXT:    vpbroadcastb %xmm1, %xmm3
-; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX2-NEXT:    vpbroadcastb %xmm1, %xmm2
+; AVX2-NEXT:    vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255]
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm0, %ymm2, %ymm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
@@ -3774,18 +3774,17 @@ define void @vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8(ptr %in.
 ; AVX512F-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm1
 ; AVX512F-SLOW-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
 ; AVX512F-SLOW-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpbroadcastw %xmm0, %xmm2
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
 ; AVX512F-SLOW-NEXT:    vpbroadcastw %xmm0, %ymm0
-; AVX512F-SLOW-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6],ymm0[7],ymm3[8],ymm0[9],ymm3[10,11],ymm0[12],ymm3[13,14],ymm0[15]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
-; AVX512F-SLOW-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
-; AVX512F-SLOW-NEXT:    vpaddb 32(%rdx), %ymm1, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, 32(%rcx)
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, (%rcx)
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
+; AVX512F-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7],ymm2[8],ymm0[9],ymm2[10,11],ymm0[12],ymm2[13,14],ymm0[15]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
+; AVX512F-SLOW-NEXT:    vpaddb (%rdx), %ymm1, %ymm1
+; AVX512F-SLOW-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, 32(%rcx)
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, (%rcx)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
@@ -3814,18 +3813,17 @@ define void @vec384_i16_widen_to_i48_factor3_broadcast_to_v8i48_factor8(ptr %in.
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpbroadcastw %xmm0, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3],xmm1[4,5],xmm2[6],xmm1[7]
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastw %xmm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2,3],ymm0[4],ymm3[5,6],ymm0[7],ymm3[8],ymm0[9],ymm3[10,11],ymm0[12],ymm3[13,14],ymm0[15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
-; AVX512DQ-SLOW-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpaddb 32(%rdx), %ymm1, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, 32(%rcx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm0, (%rcx)
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7],ymm2[8],ymm0[9],ymm2[10,11],ymm0[12],ymm2[13,14],ymm0[15]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
+; AVX512DQ-SLOW-NEXT:    vpaddb (%rdx), %ymm1, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm0, 32(%rcx)
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, (%rcx)
 ; AVX512DQ-SLOW-NEXT:    vzeroupper
 ; AVX512DQ-SLOW-NEXT:    retq
 ;
@@ -4209,18 +4207,17 @@ define void @vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4(ptr %in.
 ; AVX512F-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm1
 ; AVX512F-SLOW-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
 ; AVX512F-SLOW-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
-; AVX512F-SLOW-NEXT:    vpbroadcastw %xmm0, %xmm2
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3,4,5],xmm2[6],xmm1[7]
 ; AVX512F-SLOW-NEXT:    vpbroadcastw %xmm0, %ymm0
-; AVX512F-SLOW-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4],ymm3[5,6,7,8,9,10,11],ymm0[12],ymm3[13,14,15]
-; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
-; AVX512F-SLOW-NEXT:    vpaddb 32(%rdx), %ymm1, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, 32(%rcx)
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, (%rcx)
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5],xmm0[6],xmm1[7]
+; AVX512F-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4],ymm2[5,6,7,8,9,10,11],ymm0[12],ymm2[13,14,15]
+; AVX512F-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpaddb (%rdx), %ymm1, %ymm1
+; AVX512F-SLOW-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm0, 32(%rcx)
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, (%rcx)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
@@ -4249,18 +4246,17 @@ define void @vec384_i16_widen_to_i96_factor6_broadcast_to_v4i96_factor4(ptr %in.
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 48(%rdi), %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpaddb 48(%rsi), %xmm1, %xmm1
 ; AVX512DQ-SLOW-NEXT:    vpaddb (%rsi), %xmm0, %xmm0
-; AVX512DQ-SLOW-NEXT:    vpbroadcastw %xmm0, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3,4,5],xmm2[6],xmm1[7]
 ; AVX512DQ-SLOW-NEXT:    vpbroadcastw %xmm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4],ymm3[5,6,7,8,9,10,11],ymm0[12],ymm3[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpaddb (%rdx), %ymm0, %ymm0
-; AVX512DQ-SLOW-NEXT:    vpaddb 32(%rdx), %ymm1, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, 32(%rcx)
-; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm0, (%rcx)
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5],xmm0[6],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4],ymm2[5,6,7,8,9,10,11],ymm0[12],ymm2[13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpaddb (%rdx), %ymm1, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpaddb 32(%rdx), %ymm0, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm0, 32(%rcx)
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, (%rcx)
 ; AVX512DQ-SLOW-NEXT:    vzeroupper
 ; AVX512DQ-SLOW-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
index f5fd369cb457b..88051de876c09 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -2038,7 +2038,7 @@ define void @vec384_i8_widen_to_i24_factor3_broadcast_to_v16i24_factor16(ptr %in
 ; AVX2-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero
 ; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
-; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm2 = mem[2,3],ymm2[0,1]
+; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm2 = mem[2,3],ymm0[2,3]
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0]
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm1 = zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero,xmm1[0],zero,zero
@@ -2290,7 +2290,7 @@ define void @vec384_i8_widen_to_i48_factor6_broadcast_to_v8i48_factor8(ptr %in.e
 ; AVX2-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm1[0],zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,xmm1[0],zero
 ; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
-; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm2 = mem[2,3],ymm2[0,1]
+; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm2 = mem[2,3],ymm0[2,3]
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255,0,255,255,255,255,255,0,255,255,255,255,255,0,255,255,255]
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero
@@ -2542,7 +2542,7 @@ define void @vec384_i8_widen_to_i96_factor12_broadcast_to_v4i96_factor4(ptr %in.
 ; AVX2-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero
 ; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
-; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm2 = mem[2,3],ymm2[0,1]
+; AVX2-NEXT:    vperm2i128 {{.*#+}} ymm2 = mem[2,3],ymm0[2,3]
 ; AVX2-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255]
 ; AVX2-NEXT:    vpblendvb %ymm3, %ymm2, %ymm0, %ymm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,xmm1[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero


        


More information about the llvm-commits mailing list