[llvm] 3daf2fb - [X86] combineX86ShuffleChainWithExtract - refactor to remove need to widen all vectors

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 7 09:12:52 PST 2023


Author: Simon Pilgrim
Date: 2023-02-07T16:53:19Z
New Revision: 3daf2fb71109d89b354e12eb3f05486b317e548f

URL: https://github.com/llvm/llvm-project/commit/3daf2fb71109d89b354e12eb3f05486b317e548f
DIFF: https://github.com/llvm/llvm-project/commit/3daf2fb71109d89b354e12eb3f05486b317e548f.diff

LOG: [X86] combineX86ShuffleChainWithExtract - refactor to remove need to widen all vectors

combineX86ShuffleChain no longer needs the candidate shuffle inputs to all have been widened to root size, but combineX86ShuffleChainWithExtract still assumes that they have.

This refactor peeks through all inputs' extract_subvector nodes to find the widest legal vector type and widens the shuffle mask accordingly - it no longer bails if any of the inputs can't be widened to the new width.

combineX86ShuffleChainWithExtract now follows a similar process to combineX86ShufflesRecursively, so I think we should eventually be able to get rid of it and just let combineX86ShufflesRecursively handle widening the shuffle width as well.

Another step toward removing subvector widening for Issue #45319

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
    llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
    llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
    llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
    llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index ea6a67aace7cd..4c5f0538ee339 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -39733,77 +39733,92 @@ static SDValue combineX86ShuffleChainWithExtract(
 
   EVT RootVT = Root.getValueType();
   unsigned RootSizeInBits = RootVT.getSizeInBits();
+  unsigned RootEltSizeInBits = RootSizeInBits / NumMaskElts;
   assert((RootSizeInBits % NumMaskElts) == 0 && "Unexpected root shuffle mask");
 
-  // Bail if we have any smaller inputs.
-  if (llvm::any_of(Inputs, [RootSizeInBits](SDValue Input) {
-        return Input.getValueSizeInBits() < RootSizeInBits;
-      }))
-    return SDValue();
-
-  SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
-  SmallVector<unsigned, 4> Offsets(NumInputs, 0);
-
-  // Peek through subvectors.
-  // TODO: Support inter-mixed EXTRACT_SUBVECTORs + BITCASTs?
+  // Peek through extract_subvector to find widest legal vector.
+  // TODO: Handle ISD::TRUNCATE
   unsigned WideSizeInBits = RootSizeInBits;
-  for (unsigned i = 0; i != NumInputs; ++i) {
-    SDValue &Src = WideInputs[i];
-    unsigned &Offset = Offsets[i];
-    Src = peekThroughBitcasts(Src);
-    EVT BaseVT = Src.getValueType();
-    while (Src.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
-      Offset += Src.getConstantOperandVal(1);
-      Src = Src.getOperand(0);
-    }
-    WideSizeInBits = std::max(WideSizeInBits,
-                              (unsigned)Src.getValueSizeInBits());
-    assert((Offset % BaseVT.getVectorNumElements()) == 0 &&
-           "Unexpected subvector extraction");
-    Offset /= BaseVT.getVectorNumElements();
-    Offset *= NumMaskElts;
+  for (unsigned I = 0; I != NumInputs; ++I) {
+    SDValue Input = peekThroughBitcasts(Inputs[I]);
+    while (Input.getOpcode() == ISD::EXTRACT_SUBVECTOR)
+      Input = peekThroughBitcasts(Input.getOperand(0));
+    if (DAG.getTargetLoweringInfo().isTypeLegal(Input.getValueType()) &&
+        WideSizeInBits < Input.getValueSizeInBits())
+      WideSizeInBits = Input.getValueSizeInBits();
   }
 
-  // Bail if we're always extracting from the lowest subvectors,
-  // combineX86ShuffleChain should match this for the current width.
-  if (llvm::all_of(Offsets, [](unsigned Offset) { return Offset == 0; }))
-    return SDValue();
-
+  // Bail if we fail to find a source larger than the existing root.
   unsigned Scale = WideSizeInBits / RootSizeInBits;
-  assert((WideSizeInBits % RootSizeInBits) == 0 &&
-         "Unexpected subvector extraction");
-
-  // If the src vector types aren't the same, see if we can extend
-  // them to match each other.
-  // TODO: Support 
diff erent scalar types?
-  EVT WideSVT = WideInputs[0].getValueType().getScalarType();
-  if (llvm::any_of(WideInputs, [&WideSVT, &DAG](SDValue Op) {
-        return !DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()) ||
-               Op.getValueType().getScalarType() != WideSVT;
-      }))
+  if (WideSizeInBits <= RootSizeInBits ||
+      (WideSizeInBits % RootSizeInBits) != 0)
     return SDValue();
 
   // Create new mask for larger type.
-  for (unsigned i = 1; i != NumInputs; ++i)
-    Offsets[i] += i * Scale * NumMaskElts;
-
   SmallVector<int, 64> WideMask(BaseMask);
   for (int &M : WideMask) {
     if (M < 0)
       continue;
-    M = (M % NumMaskElts) + Offsets[M / NumMaskElts];
+    M = (M % NumMaskElts) + ((M / NumMaskElts) * Scale * NumMaskElts);
   }
   WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);
 
+  // Attempt to peek through inputs and adjust mask when we extract from an
+  // upper subvector.
+  int AdjustedMasks = 0;
+  SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
+  for (unsigned I = 0; I != NumInputs; ++I) {
+    SDValue &Input = WideInputs[I];
+    Input = peekThroughBitcasts(Input);
+    while (Input.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
+           Input.getOperand(0).getValueSizeInBits() <= WideSizeInBits) {
+      uint64_t Idx = Input.getConstantOperandVal(1);
+      if (Idx != 0) {
+        ++AdjustedMasks;
+        unsigned InputEltSizeInBits = Input.getScalarValueSizeInBits();
+        Idx = (Idx * InputEltSizeInBits) / RootEltSizeInBits;
+
+        int lo = I * WideMask.size();
+        int hi = (I + 1) * WideMask.size();
+        for (int &M : WideMask)
+          if (lo <= M && M < hi)
+            M += Idx;
+      }
+      Input = peekThroughBitcasts(Input.getOperand(0));
+    }
+  }
+
   // Remove unused/repeated shuffle source ops.
   resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
   assert(!WideInputs.empty() && "Shuffle with no inputs detected");
 
-  if (WideInputs.size() > 2)
-    return SDValue();
+  // Bail if we're always extracting from the lowest subvectors,
+  // combineX86ShuffleChain should match this for the current width, or the
+  // shuffle still references too many inputs.
+  if (AdjustedMasks == 0 || WideInputs.size() > 2)
+    return SDValue();
+
+  // Minor canonicalization of the accumulated shuffle mask to make it easier
+  // to match below. All this does is detect masks with sequential pairs of
+  // elements, and shrink them to the half-width mask. It does this in a loop
+  // so it will reduce the size of the mask to the minimal width mask which
+  // performs an equivalent shuffle.
+  while (WideMask.size() > 1) {
+    SmallVector<int, 64> WidenedMask;
+    if (!canWidenShuffleElements(WideMask, WidenedMask))
+      break;
+    WideMask = std::move(WidenedMask);
+  }
+
+  // Canonicalization of binary shuffle masks to improve pattern matching by
+  // commuting the inputs.
+  if (WideInputs.size() == 2 && canonicalizeShuffleMaskWithCommute(WideMask)) {
+    ShuffleVectorSDNode::commuteMask(WideMask);
+    std::swap(WideInputs[0], WideInputs[1]);
+  }
 
   // Increase depth for every upper subvector we've peeked through.
-  Depth += count_if(Offsets, [](unsigned Offset) { return Offset > 0; });
+  Depth += AdjustedMasks;
 
   // Attempt to combine wider chain.
   // TODO: Can we use a better Root?
@@ -39811,6 +39826,9 @@ static SDValue combineX86ShuffleChainWithExtract(
                              WideInputs.back().getValueSizeInBits()
                          ? WideInputs.front()
                          : WideInputs.back();
+  assert(WideRoot.getValueSizeInBits() == WideSizeInBits &&
+         "WideRootSize mismatch");
+
   if (SDValue WideShuffle =
           combineX86ShuffleChain(WideInputs, WideRoot, WideMask, Depth,
                                  HasVariableMask, AllowVariableCrossLaneMask,
@@ -39819,6 +39837,7 @@ static SDValue combineX86ShuffleChainWithExtract(
         extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
     return DAG.getBitcast(RootVT, WideShuffle);
   }
+
   return SDValue();
 }
 

diff  --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
index c943ba53ae9bc..59170e99521e5 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
@@ -750,15 +750,9 @@ define void @vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4(ptr %in.
 ; AVX512BW-SLOW-LABEL: vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4:
 ; AVX512BW-SLOW:       # %bb.0:
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,9,0,11,0,13,0,15]
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,9,0,11,4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpermw %zmm0, %zmm1, %zmm1
-; AVX512BW-SLOW-NEXT:    vmovd %xmm0, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %eax, %xmm1, %xmm1
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5],xmm1[6,7]
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %eax, %xmm1, %xmm1
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6],xmm0[7]
+; AVX512BW-SLOW-NEXT:    vpermw %zmm0, %zmm1, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
@@ -767,16 +761,13 @@ define void @vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4(ptr %in.
 ; AVX512BW-FAST-LABEL: vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4:
 ; AVX512BW-FAST:       # %bb.0:
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,9,0,11,0,13,6,7]
 ; AVX512BW-FAST-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,9,0,3,4,5,6,7]
 ; AVX512BW-FAST-NEXT:    vpermw %zmm0, %zmm1, %zmm1
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5,6,7]
 ; AVX512BW-FAST-NEXT:    vmovd %xmm0, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %eax, %xmm1, %xmm0
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm2[5],xmm0[6,7]
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7]
+; AVX512BW-FAST-NEXT:    vpinsrw $6, %eax, %xmm1, %xmm1
+; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6],xmm0[7]
 ; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper
@@ -877,8 +868,8 @@ define void @vec128_i16_widen_to_i64_factor4_broadcast_to_v2i64_factor2(ptr %in.
 ; AVX512BW-SLOW-LABEL: vec128_i16_widen_to_i64_factor4_broadcast_to_v2i64_factor2:
 ; AVX512BW-SLOW:       # %bb.0:
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512BW-SLOW-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,9,10,11,0,13,6,7]
+; AVX512BW-SLOW-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpermw %zmm0, %zmm1, %zmm1
 ; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
@@ -890,8 +881,8 @@ define void @vec128_i16_widen_to_i64_factor4_broadcast_to_v2i64_factor2(ptr %in.
 ; AVX512BW-FAST-LABEL: vec128_i16_widen_to_i64_factor4_broadcast_to_v2i64_factor2:
 ; AVX512BW-FAST:       # %bb.0:
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512BW-FAST-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,9,10,11,0,5,6,7]
+; AVX512BW-FAST-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vpermw %zmm0, %zmm1, %zmm1
 ; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5,6,7]
@@ -2040,11 +2031,10 @@ define void @vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4(ptr %in.
 ; AVX512BW-FAST-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4:
 ; AVX512BW-FAST:       # %bb.0:
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,25,0,27,0,29,0,31]
 ; AVX512BW-FAST-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,9,0,11,0,13,0,15]
-; AVX512BW-FAST-NEXT:    vpermi2d %ymm1, %ymm0, %ymm2
-; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm2, %zmm0
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm1, %zmm0
+; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper
 ; AVX512BW-FAST-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
index 7f838ec95d645..742a5fb649931 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -643,14 +643,8 @@ define void @vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4(ptr %in.
 ;
 ; AVX512BW-SLOW-LABEL: vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4:
 ; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,9,0,11,4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,9,0,11,0,13,0,15]
 ; AVX512BW-SLOW-NEXT:    vpermw (%rdi), %zmm0, %zmm0
-; AVX512BW-SLOW-NEXT:    movl (%rdi), %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %eax, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5],xmm0[6,7]
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rdx)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
@@ -658,15 +652,10 @@ define void @vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4(ptr %in.
 ;
 ; AVX512BW-FAST-LABEL: vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4:
 ; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,9,0,3,4,5,6,7]
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,9,0,11,0,13,6,7]
 ; AVX512BW-FAST-NEXT:    vpermw (%rdi), %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512BW-FAST-NEXT:    movl (%rdi), %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %eax, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5],xmm0[6,7]
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
+; AVX512BW-FAST-NEXT:    vpinsrw $6, (%rdi), %xmm0, %xmm0
+; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],mem[7]
 ; AVX512BW-FAST-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rdx)
 ; AVX512BW-FAST-NEXT:    vzeroupper

diff  --git a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
index 2d4c2df704173..26bdbeb77ccd4 100644
--- a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
+++ b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
@@ -2691,26 +2691,40 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask7(ptr %vp, <4 x i64>
 }
 
 define <2 x i64> @test_8xi64_to_2xi64_perm_mem_mask0(ptr %vp) {
-; CHECK-LABEL: test_8xi64_to_2xi64_perm_mem_mask0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovaps {{.*#+}} xmm0 = [4,1]
-; CHECK-NEXT:    vpermpd (%rdi), %zmm0, %zmm0
-; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; CHECK-FAST-LABEL: test_8xi64_to_2xi64_perm_mem_mask0:
+; CHECK-FAST:       # %bb.0:
+; CHECK-FAST-NEXT:    vmovaps {{.*#+}} xmm0 = [4,1]
+; CHECK-FAST-NEXT:    vpermpd (%rdi), %zmm0, %zmm0
+; CHECK-FAST-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; CHECK-FAST-NEXT:    vzeroupper
+; CHECK-FAST-NEXT:    retq
+;
+; CHECK-FAST-PERLANE-LABEL: test_8xi64_to_2xi64_perm_mem_mask0:
+; CHECK-FAST-PERLANE:       # %bb.0:
+; CHECK-FAST-PERLANE-NEXT:    vmovaps 32(%rdi), %xmm0
+; CHECK-FAST-PERLANE-NEXT:    vblendps $12, (%rdi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[2,3]
+; CHECK-FAST-PERLANE-NEXT:    retq
   %vec = load <8 x i64>, ptr %vp
   %res = shufflevector <8 x i64> %vec, <8 x i64> undef, <2 x i32> <i32 4, i32 1>
   ret <2 x i64> %res
 }
 define <2 x i64> @test_masked_8xi64_to_2xi64_perm_mem_mask0(ptr %vp, <2 x i64> %vec2, <2 x i64> %mask) {
-; CHECK-LABEL: test_masked_8xi64_to_2xi64_perm_mem_mask0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovdqa {{.*#+}} xmm2 = [4,1]
-; CHECK-NEXT:    vpermq (%rdi), %zmm2, %zmm2
-; CHECK-NEXT:    vptestnmq %xmm1, %xmm1, %k1
-; CHECK-NEXT:    vmovdqa64 %xmm2, %xmm0 {%k1}
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; CHECK-FAST-LABEL: test_masked_8xi64_to_2xi64_perm_mem_mask0:
+; CHECK-FAST:       # %bb.0:
+; CHECK-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [4,1]
+; CHECK-FAST-NEXT:    vpermq (%rdi), %zmm2, %zmm2
+; CHECK-FAST-NEXT:    vptestnmq %xmm1, %xmm1, %k1
+; CHECK-FAST-NEXT:    vmovdqa64 %xmm2, %xmm0 {%k1}
+; CHECK-FAST-NEXT:    vzeroupper
+; CHECK-FAST-NEXT:    retq
+;
+; CHECK-FAST-PERLANE-LABEL: test_masked_8xi64_to_2xi64_perm_mem_mask0:
+; CHECK-FAST-PERLANE:       # %bb.0:
+; CHECK-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %xmm2
+; CHECK-FAST-PERLANE-NEXT:    vpblendd $12, (%rdi), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[2,3]
+; CHECK-FAST-PERLANE-NEXT:    vptestnmq %xmm1, %xmm1, %k1
+; CHECK-FAST-PERLANE-NEXT:    vmovdqa64 %xmm2, %xmm0 {%k1}
+; CHECK-FAST-PERLANE-NEXT:    retq
   %vec = load <8 x i64>, ptr %vp
   %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <2 x i32> <i32 4, i32 1>
   %cmp = icmp eq <2 x i64> %mask, zeroinitializer
@@ -2719,14 +2733,22 @@ define <2 x i64> @test_masked_8xi64_to_2xi64_perm_mem_mask0(ptr %vp, <2 x i64> %
 }
 
 define <2 x i64> @test_masked_z_8xi64_to_2xi64_perm_mem_mask0(ptr %vp, <2 x i64> %mask) {
-; CHECK-LABEL: test_masked_z_8xi64_to_2xi64_perm_mem_mask0:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,1]
-; CHECK-NEXT:    vptestnmq %xmm0, %xmm0, %k1
-; CHECK-NEXT:    vpermq (%rdi), %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
-; CHECK-NEXT:    vzeroupper
-; CHECK-NEXT:    retq
+; CHECK-FAST-LABEL: test_masked_z_8xi64_to_2xi64_perm_mem_mask0:
+; CHECK-FAST:       # %bb.0:
+; CHECK-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,1]
+; CHECK-FAST-NEXT:    vptestnmq %xmm0, %xmm0, %k1
+; CHECK-FAST-NEXT:    vpermq (%rdi), %zmm1, %zmm0 {%k1} {z}
+; CHECK-FAST-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
+; CHECK-FAST-NEXT:    vzeroupper
+; CHECK-FAST-NEXT:    retq
+;
+; CHECK-FAST-PERLANE-LABEL: test_masked_z_8xi64_to_2xi64_perm_mem_mask0:
+; CHECK-FAST-PERLANE:       # %bb.0:
+; CHECK-FAST-PERLANE-NEXT:    vmovdqa 32(%rdi), %xmm1
+; CHECK-FAST-PERLANE-NEXT:    vpblendd $12, (%rdi), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[2,3]
+; CHECK-FAST-PERLANE-NEXT:    vptestnmq %xmm0, %xmm0, %k1
+; CHECK-FAST-PERLANE-NEXT:    vmovdqa64 %xmm1, %xmm0 {%k1} {z}
+; CHECK-FAST-PERLANE-NEXT:    retq
   %vec = load <8 x i64>, ptr %vp
   %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <2 x i32> <i32 4, i32 1>
   %cmp = icmp eq <2 x i64> %mask, zeroinitializer

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
index 6ad3c95d376fe..62102bd955374 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-7.ll
@@ -10559,380 +10559,376 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ;
 ; AVX512BW-ONLY-SLOW-LABEL: load_i8_stride7_vf64:
 ; AVX512BW-ONLY-SLOW:       # %bb.0:
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm24
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm22
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm23
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm2
 ; AVX512BW-ONLY-SLOW-NEXT:    movw $-28382, %ax # imm = 0x9122
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm1, %ymm24, %ymm0 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    kmovq %k1, %k2
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[5,12],zero,zero,xmm5[1,8,15,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k6
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm1, %ymm22, %ymm0 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[5,12],zero,zero,xmm4[1,8,15,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm5, %xmm0, %xmm22
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm4, %xmm0, %xmm16
 ; AVX512BW-ONLY-SLOW-NEXT:    movw $9288, %ax # imm = 0x2448
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k5
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm23, %ymm3, %ymm0 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7,8,9],ymm5[10],ymm0[11,12],ymm5[13],ymm0[14,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm7, %ymm2, %ymm0 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm4[2],ymm0[3,4],ymm4[5],ymm0[6,7,8,9],ymm4[10],ymm0[11,12],ymm4[13],ymm0[14,15]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm0, %ymm22 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm0, %ymm16 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm9
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm6
 ; AVX512BW-ONLY-SLOW-NEXT:    movw $8772, %ax # imm = 0x2244
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k7
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm8, %ymm7, %ymm0 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[6,13,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k3
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm9, %ymm6, %ymm0 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[6,13,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero,xmm0[u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm0, %xmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm11
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm8
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm18 = <u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm18, %xmm11, %xmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm13
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm13[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm5[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm5[5,12,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm6
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm6[0,7,14],zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm9, %xmm10, %xmm9
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm9, %zmm0, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm18, %xmm8, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm10
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm10[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm4[7]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 240(%rdi), %xmm24
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm24[5,12,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 224(%rdi), %xmm25
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm25[0,7,14],zero,zero,xmm25[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm11, %xmm12, %xmm11
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm11, %zmm0, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $137438429184, %rax # imm = 0x1FFFF80000
-; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rax, %k3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm22 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm10
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm9
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm10, %ymm9, %ymm0 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rax, %k2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm16 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm14
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm13
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm14, %ymm13, %ymm0 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm0[u,u,u,u,u,3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u],zero,zero,xmm0[1,8,15],zero,zero,xmm0[4,11,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm12, %xmm0, %xmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm14
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm15
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm14, %ymm15, %ymm0 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm0[2,3,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm12[2],ymm0[3,4,5],ymm12[6],ymm0[7,8,9],ymm12[10],ymm0[11,12,13],ymm12[14],ymm0[15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm11, %xmm0, %xmm20
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 352(%rdi), %ymm17
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm0
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm17, %ymm0, %ymm11 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm11[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0,1],ymm12[2],ymm11[3,4,5],ymm12[6],ymm11[7,8,9],ymm12[10],ymm11[11,12,13],ymm12[14],ymm11[15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    movw $3968, %ax # imm = 0xF80
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    kmovw %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm0, %ymm20 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 416(%rdi), %ymm16
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm12
+; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm11, %ymm20 {%k7}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 416(%rdi), %ymm15
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm11
 ; AVX512BW-ONLY-SLOW-NEXT:    movw $4644, %ax # imm = 0x1224
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k4
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm16, %ymm12, %ymm0 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm0, %xmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u,u,u,u],zero,zero,zero,xmm17[6,13],zero,zero,xmm17[2,9]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm17, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm0, %ymm0, %ymm21
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm15, %ymm11, %ymm12 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm12, %xmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm19[u,u,u,u,u,u,u],zero,zero,zero,xmm19[6,13],zero,zero,xmm19[2,9]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm12[4,11],zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm19, %xmm12, %xmm12
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm12, %ymm0, %ymm21
 ; AVX512BW-ONLY-SLOW-NEXT:    movl $-8388608, %eax # imm = 0xFF800000
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm1, %ymm24, %ymm0 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm0, %xmm17
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = zero,zero,zero,xmm17[6,13],zero,zero,xmm17[2,9,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm17, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm3, %ymm23, %ymm2 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4,5],ymm4[6],ymm2[7,8,9],ymm4[10],ymm2[11,12,13],ymm4[14],ymm2[15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm17 = ymm2[u,u,u,u,u,u,u,u,u,0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm1, %ymm22, %ymm12 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm12, %xmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = zero,zero,zero,xmm19[6,13],zero,zero,xmm19[2,9,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[1,8,15],zero,zero,xmm12[4,11],zero,zero,xmm12[u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm19, %xmm12, %xmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm2, %ymm7, %ymm12 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    kmovw %k6, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm12[0,1],ymm3[2],ymm12[3,4,5],ymm3[6],ymm12[7,8,9],ymm3[10],ymm12[11,12,13],ymm3[14],ymm12[15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm3[u,u,u,u,u,u,u,u,u,0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    movl $511, %edi # imm = 0x1FF
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %edi, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm17 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm8, %ymm7, %ymm0 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero,xmm0[u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm13[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm11[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm2[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm5[6,13,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[1,8,15],zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm4, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm17 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm3, %ymm23, %ymm0 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7,8,9,10],ymm2[11],ymm0[12,13],ymm2[14],ymm0[15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm1, %ymm24, %ymm2 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[2,9],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm2, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    movl $261632, %edi # imm = 0x3FE00
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %edi, %k6
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 {%k6} = ymm0[u,u,u,u,u,u,u,u,u,1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm7, %ymm8, %ymm0 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,zero,xmm4[5,12],zero,zero,xmm4[1,8,15,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm18, %xmm13, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm18 = xmm11[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm18[0],xmm4[0],xmm18[1],xmm4[1],xmm18[2],xmm4[2],xmm18[3],xmm4[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm4[7]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[2,9],zero,zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm18 = zero,zero,xmm5[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm4, %xmm18, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm0, %zmm18
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm2, %zmm18 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm3, %ymm23, %ymm0 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5,6],ymm2[7,8],ymm0[9,10],ymm2[11],ymm0[12,13,14],ymm2[15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm1, %ymm24, %ymm2 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[3,10],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm2, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 {%k6} = ymm0[u,u,u,u,u,u,u,u,u,2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm7, %ymm8, %ymm0 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,zero,xmm4[6,13],zero,zero,xmm4[2,9,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm13[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm13[5,12]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm11[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm19, %ymm12 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm9, %ymm6, %ymm3 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm3[u,u,u,6,13],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,xmm3[4,11],zero,zero,xmm3[0,7,14,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm19, %xmm3, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm10[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm8[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm19 = xmm23[0],xmm19[0],xmm23[1],xmm19[1],xmm23[2],xmm19[2],xmm23[3],xmm19[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm19, %ymm0, %ymm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm4[7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm24[6,13,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm25[1,8,15],zero,zero,xmm25[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm4, %xmm19, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512BW-ONLY-SLOW-NEXT:    movl $-134217728, %edi # imm = 0xF8000000
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm3, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm3, %zmm12 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm2, %ymm7, %ymm3 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8,9,10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm1, %ymm22, %ymm4 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm4[2,9],zero,zero,zero,xmm4[5,12],zero,zero,xmm4[u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[0,7,14],zero,zero,xmm4[3,10,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm19, %xmm4, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    movl $261632, %edi # imm = 0x3FE00
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %edi, %k2
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 {%k2} = ymm3[u,u,u,u,u,u,u,u,u,1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm4, %ymm0 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[3,10],zero,zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = zero,zero,xmm5[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm4, %xmm19, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm0, %zmm19
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm2, %zmm19 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm6, %ymm9, %ymm3 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm3, %xmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm19[u,u],zero,zero,zero,xmm19[5,12],zero,zero,xmm19[1,8,15,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,0,7,14],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm19, %xmm3, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm18, %xmm10, %xmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm8[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm18 = xmm19[0],xmm18[0],xmm19[1],xmm18[1],xmm19[2],xmm18[2],xmm19[3],xmm18[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm18, %ymm0, %ymm5
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm5[7]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm25[2,9],zero,zero,zero,xmm25[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm18 = zero,zero,xmm24[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm5, %xmm18, %xmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm5, %zmm3, %zmm18
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm4, %zmm18 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm2, %ymm7, %ymm3 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1,2],ymm4[3],ymm3[4,5,6],ymm4[7,8],ymm3[9,10],ymm4[11],ymm3[12,13,14],ymm4[15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm1, %ymm22, %ymm4 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm4[3,10],zero,zero,zero,xmm4[6,13],zero,zero,xmm4[u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[1,8,15],zero,zero,xmm4[4,11,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 {%k2} = ymm3[u,u,u,u,u,u,u,u,u,2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm6, %ymm9, %ymm3 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u],zero,zero,zero,xmm5[6,13],zero,zero,xmm5[2,9,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,1,8,15],zero,zero,xmm3[4,11],zero,zero,xmm3[u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm3, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm10[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm10[5,12]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm8[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm5, %xmm19, %xmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
+; AVX512BW-ONLY-SLOW-NEXT:    movl $-134217728, %edi # imm = 0xF8000000
+; AVX512BW-ONLY-SLOW-NEXT:    kmovd %edi, %k6
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm5, %ymm3 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm25[3,10],zero,zero,zero,xmm25[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = zero,zero,xmm24[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm5, %xmm19, %xmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $2, %xmm5, %zmm3, %zmm19
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm4, %zmm19 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k2
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm21, %ymm20 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm20, %zmm0, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm20, %zmm0, %zmm3
 ; AVX512BW-ONLY-SLOW-NEXT:    movabsq $-137438953472, %rax # imm = 0xFFFFFFE000000000
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovq %rax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm22 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm9, %ymm10, %ymm0 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[5,12,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm14, %ymm15, %ymm2 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3],ymm2[4,5],ymm4[6],ymm2[7,8,9,10],ymm4[11],ymm2[12,13],ymm4[14],ymm2[15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k3 # 2-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm2, %ymm0 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm16, %ymm12, %ymm2 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm2[5,12],zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm2, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm2, %ymm0 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm17 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm9, %ymm10, %ymm0 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[6,13,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero,xmm0[u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm15, %ymm14, %ymm2 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2],ymm4[3],ymm2[4,5,6],ymm4[7,8],ymm2[9,10],ymm4[11],ymm2[12,13,14],ymm4[15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm2, %ymm0 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm16, %ymm12, %ymm2 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm2[6,13],zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm2, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm2, %ymm0 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm18 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm9, %ymm10, %ymm0 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero,xmm0[u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm15, %ymm14, %ymm2 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2,3],ymm4[4],ymm2[5,6],ymm4[7,8],ymm2[9,10,11],ymm4[12],ymm2[13,14],ymm4[15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm2, %ymm0 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm12, %ymm16, %ymm2 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u],zero,zero,xmm4[2,9],zero,zero,zero,xmm4[5,12]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,4,11],zero,zero,xmm2[0,7,14],zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm2, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm2, %ymm0 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm19 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm3, %zmm16 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm13, %ymm14, %ymm3 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u],zero,zero,xmm4[2,9],zero,zero,zero,xmm4[5,12,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,4,11],zero,zero,xmm3[0,7,14],zero,zero,xmm3[u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm17, %ymm0, %ymm4 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7,8,9,10],ymm5[11],ymm4[12,13],ymm5[14],ymm4[15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm4, %ymm3 {%k7}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm15, %ymm11, %ymm4 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm4[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm4[5,12],zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u],zero,zero,xmm4[0,7,14],zero,zero,xmm4[3,10]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm4, %ymm3 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm3, %zmm12 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm13, %ymm14, %ymm3 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[6,13,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,5,12],zero,zero,xmm3[1,8,15],zero,zero,xmm3[u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm0, %ymm17, %ymm4 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5,6],ymm5[7,8],ymm4[9,10],ymm5[11],ymm4[12,13,14],ymm5[15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm4, %ymm3 {%k7}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm15, %ymm11, %ymm4 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm4[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm4[6,13],zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u],zero,zero,xmm4[1,8,15],zero,zero,xmm4[4,11]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm4, %ymm3 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm3, %zmm18 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm13, %ymm14, %ymm3 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,6,13],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u],zero,zero,xmm3[4,11],zero,zero,xmm3[0,7,14,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm0, %ymm17, %ymm4 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1,2,3],ymm5[4],ymm4[5,6],ymm5[7,8],ymm4[9,10,11],ymm5[12],ymm4[13,14],ymm5[15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm4, %ymm3 {%k7}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm11, %ymm15, %ymm4 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u],zero,zero,xmm5[2,9],zero,zero,zero,xmm5[5,12]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u,4,11],zero,zero,xmm4[0,7,14],zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm4, %ymm3 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %zmm3, %zmm19 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm10, %ymm9, %ymm0 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm2, %xmm0, %xmm20
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm15, %ymm14, %ymm0 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6,7,8],ymm2[9],ymm0[10,11],ymm2[12],ymm0[13,14,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm0, %ymm20 {%k3}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm10, %ymm9, %ymm0 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[2,9,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm14, %ymm15, %ymm2 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    kmovq %k1, %k3
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3,4],ymm4[5],ymm2[6,7,8],ymm4[9],ymm2[10,11,12],ymm4[13],ymm2[14,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm21 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm14, %ymm13, %ymm3 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u],zero,zero,zero,xmm4[5,12],zero,zero,xmm4[1,8,15,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,0,7,14],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm4, %xmm3, %xmm20
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm0, %ymm17, %ymm3 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6,7,8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm3, %ymm20 {%k7}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm14, %ymm13, %ymm3 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u],zero,zero,zero,xmm4[6,13],zero,zero,xmm4[2,9,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,1,8,15],zero,zero,xmm3[4,11],zero,zero,xmm3[u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm17, %ymm0, %ymm4 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    kmovq %k1, %k7
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3,4],ymm5[5],ymm4[6,7,8],ymm5[9],ymm4[10,11,12],ymm5[13],ymm4[14,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm21 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    movl $8176, %eax # imm = 0x1FF0
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm21 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm12, %ymm16, %ymm0 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[6,13]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm20 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm12, %ymm16, %ymm0 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[u,u,u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm21 {%k2}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm3, %ymm23, %ymm0 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm24, %ymm1, %ymm2 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm24, %ymm1, %ymm4 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm7, %ymm8, %ymm25 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm24, %ymm1 {%k5}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm7, %ymm8, %ymm24 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm26 = xmm24[u,u,2,9],zero,zero,zero,xmm24[5,12],zero,zero,xmm24[u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm24, %xmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm24 = xmm24[u,u],zero,zero,xmm24[0,7,14],zero,zero,xmm24[3,10,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm26, %xmm24, %xmm24
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm24, %ymm0, %ymm24
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm26 = xmm13[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm13[6,13]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm27 = xmm11[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm26, %xmm27, %xmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm26, %ymm0, %ymm26
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 4-byte Reload
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm26, %ymm24 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm26 = xmm25[u,u,3,10],zero,zero,zero,xmm25[6,13],zero,zero,xmm25[u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti32x4 $1, %ymm25, %xmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm25 = xmm25[u,u],zero,zero,xmm25[1,8,15],zero,zero,xmm25[4,11,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm26, %xmm25, %xmm25
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm26 = xmm11[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm27 = xmm13[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm13[0,7,14]
-; AVX512BW-ONLY-SLOW-NEXT:    vporq %xmm26, %xmm27, %xmm26
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm25, %ymm0, %ymm25
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti32x4 $1, %xmm26, %ymm0, %ymm26
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm26, %ymm25 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm8, %ymm7 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u],zero,zero,xmm8[2,9],zero,zero,zero,xmm8[5,12,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,4,11],zero,zero,xmm7[0,7,14],zero,zero,xmm7[u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm7, %xmm8, %xmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm3, %ymm21 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm11, %ymm15, %ymm3 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[6,13]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,5,12],zero,zero,xmm3[1,8,15],zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm3, %ymm20 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm11, %ymm15, %ymm3 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u,u,6,13],zero,zero,xmm3[2,9],zero,zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,xmm3[4,11],zero,zero,xmm3[0,7,14]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm3, %ymm21 {%k2}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm17, %ymm0 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm0[2,3,0,1]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7,8,9],ymm3[10],ymm0[11,12],ymm3[13],ymm0[14,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm14, %ymm13 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm13[u,u,u,u,2,9],zero,zero,zero,xmm13[5,12],zero,zero,xmm13[u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u],zero,zero,xmm4[0,7,14],zero,zero,xmm4[3,10,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm0, %xmm4, %xmm0
+; AVX512BW-ONLY-SLOW-NEXT:    movl $4186112, %eax # imm = 0x3FE000
+; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 {%k1} = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,1,8,15,22,29,20,27,18,25,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm15, %ymm11 {%k7}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm2, %ymm7, %ymm14 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm6, %ymm9, %ymm3 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm7, %ymm2, %ymm13 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm22, %ymm1, %ymm4 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm7, %ymm2 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm22, %ymm1, %ymm5 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm6, %ymm9, %ymm7 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm22, %ymm1 {%k5}
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm9, %ymm6 {%k4}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm3[u,u,2,9],zero,zero,zero,xmm3[5,12],zero,zero,xmm3[u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,xmm3[0,7,14],zero,zero,xmm3[3,10,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm9, %xmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm10[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm10[6,13]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm8[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm9, %xmm15, %xmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm9, %ymm3 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm7[u,u,3,10],zero,zero,zero,xmm7[6,13],zero,zero,xmm7[u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u],zero,zero,xmm7[1,8,15],zero,zero,xmm7[4,11,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm7, %xmm9, %xmm7
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm11[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm13[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm13[1,8,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm8, %xmm11, %xmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm8[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm10[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm10[0,7,14]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm9, %xmm15, %xmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm9, %ymm7 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u],zero,zero,xmm9[2,9],zero,zero,zero,xmm9[5,12,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,4,11],zero,zero,xmm6[0,7,14],zero,zero,xmm6[u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm6, %xmm9, %xmm6
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm10[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm10[1,8,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm8, %xmm9, %xmm8
 ; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm8, %ymm7 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0],ymm0[1,2,3],ymm8[4],ymm0[5,6],ymm8[7,8],ymm0[9,10,11],ymm8[12],ymm0[13,14],ymm8[15]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[2,9],zero,zero,zero,xmm8[5,12,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,11],zero,zero,xmm2[0,7,14],zero,zero,xmm2[u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm8, %xmm2
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 {%k6} = ymm0[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm5[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm11[0],xmm8[1],xmm11[1],xmm8[2],xmm11[2],xmm8[3],xmm11[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm24, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm8, %zmm2 {%k6}
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendmw %ymm23, %ymm3, %ymm8 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm8, %xmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm11[1],ymm8[2,3],ymm11[4],ymm8[5,6,7,8],ymm11[9],ymm8[10,11],ymm11[12],ymm8[13,14,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm11
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[3,10],zero,zero,zero,xmm11[6,13,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu8 %ymm8, %ymm6 {%k6}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm8
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm14[1,2,3],ymm8[4],ymm14[5,6],ymm8[7,8],ymm14[9,10,11],ymm8[12],ymm14[13,14],ymm8[15]
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,xmm9[2,9],zero,zero,zero,xmm9[5,12,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,11],zero,zero,xmm5[0,7,14],zero,zero,xmm5[u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm5, %xmm9, %xmm5
+; AVX512BW-ONLY-SLOW-NEXT:    kmovd {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 4-byte Reload
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 {%k1} = ymm8[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm25, %xmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm24[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm3, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm3, %zmm5 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm20, %zmm0, %zmm3
+; AVX512BW-ONLY-SLOW-NEXT:    movw $-512, %ax # imm = 0xFE00
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm9
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm13[0],ymm9[1],ymm13[2,3],ymm9[4],ymm13[5,6,7,8],ymm9[9],ymm13[10,11],ymm9[12],ymm13[13,14,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm10
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = zero,zero,xmm10[3,10],zero,zero,zero,xmm10[6,13,u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[5,12],zero,zero,xmm4[1,8,15],zero,zero,xmm4[u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm11, %xmm4
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 {%k6} = ymm8[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm5[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm6[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm11[0],xmm8[0],xmm11[1],xmm8[1],xmm11[2],xmm8[2],xmm11[3],xmm8[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm25, %zmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm8, %zmm4 {%k6}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm23, %ymm3 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm8
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm8[1],ymm3[2,3,4],ymm8[5],ymm3[6,7,8],ymm8[9],ymm3[10,11,12],ymm8[13],ymm3[14,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm1[6,13],zero,zero,xmm1[2,9],zero,zero,zero,xmm1[u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm4, %xmm10, %xmm4
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 {%k1} = ymm9[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm24[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm25[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm7, %zmm4 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm7
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm7[1],ymm2[2,3,4],ymm7[5],ymm2[6,7,8],ymm7[9],ymm2[10,11,12],ymm7[13],ymm2[14,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm1[6,13],zero,zero,xmm1[2,9],zero,zero,zero,xmm1[u,u,u,u,u,u,u]
 ; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[4,11],zero,zero,xmm1[0,7,14,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm1, %xmm8, %xmm1
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 {%k6} = ymm3[u,u,u,u,u,u,u,u,u,5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm6[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm7, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k6}
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm14, %ymm15 {%k4}
-; AVX512BW-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm15[2,3,0,1]
-; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm15[0,1],ymm0[2],ymm15[3,4],ymm0[5],ymm15[6,7,8,9],ymm0[10],ymm15[11,12],ymm0[13],ymm15[14,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm10, %ymm9 {%k7}
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm9[u,u,u,u,2,9],zero,zero,zero,xmm9[5,12],zero,zero,xmm9[u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u],zero,zero,xmm5[0,7,14],zero,zero,xmm5[3,10,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm5, %xmm3
-; AVX512BW-ONLY-SLOW-NEXT:    movl $4186112, %eax # imm = 0x3FE000
-; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 {%k1} = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,1,8,15,22,29,20,27,18,25,u,u,u,u,u,u,u,u,u,u]
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm20, %zmm0, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    movw $-512, %ax # imm = 0xFE00
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %ymm16, %ymm12 {%k3}
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm7, %xmm1, %xmm1
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 {%k1} = ymm2[u,u,u,u,u,u,u,u,u,5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb %xmm8, %xmm24, %xmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm25[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512BW-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm6, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqu16 %zmm2, %zmm1 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm2 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm4 {%k1}
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u],zero,zero,zero,xmm3[5,12],zero,zero,xmm3[1,8,15]
-; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm12[u,u,u,u,u,u,0,7,14],zero,zero,xmm12[3,10],zero,zero,zero
-; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm3, %xmm5, %xmm3
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [16,17,18,19,20,21,22,23,24,25,26,43,44,45,46,47]
-; AVX512BW-ONLY-SLOW-NEXT:    vpermi2w %zmm3, %zmm0, %zmm5
-; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm0
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa32 %zmm3, %zmm5 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa32 %zmm2, %zmm4 {%k1}
+; AVX512BW-ONLY-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm11[u,u,u,u,u,u,0,7,14],zero,zero,xmm11[3,10],zero,zero,zero
+; AVX512BW-ONLY-SLOW-NEXT:    vpor %xmm2, %xmm3, %xmm2
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3,4,5,6,7],ymm0[8,9,10],ymm2[11,12,13,14,15]
+; AVX512BW-ONLY-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX512BW-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa32 %zmm0, %zmm1 {%k1}
 ; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rdi
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm22, (%rsi)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm17, (%rdx)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm16, (%rsi)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm12, (%rdx)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm18, (%rcx)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm19, (%r8)
-; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, (%r9)
+; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, (%r9)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, (%rdi)
 ; AVX512BW-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, (%rax)
 ; AVX512BW-ONLY-SLOW-NEXT:    vzeroupper
@@ -11305,376 +11301,376 @@ define void @load_i8_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ;
 ; AVX512DQBW-SLOW-LABEL: load_i8_stride7_vf64:
 ; AVX512DQBW-SLOW:       # %bb.0:
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm26
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm22
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm1
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 64(%rdi), %ymm25
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 64(%rdi), %ymm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 96(%rdi), %ymm2
 ; AVX512DQBW-SLOW-NEXT:    movw $-28382, %ax # imm = 0x9122
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm1, %ymm26, %ymm0 {%k1}
-; AVX512DQBW-SLOW-NEXT:    kmovq %k1, %k2
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm5
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm5[5,12],zero,zero,xmm5[1,8,15,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k2
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm1, %ymm22, %ymm0 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[5,12],zero,zero,xmm4[1,8,15,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm5, %xmm0, %xmm24
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm4, %xmm0, %xmm16
 ; AVX512DQBW-SLOW-NEXT:    movw $9288, %ax # imm = 0x2448
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k3
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm25, %ymm3, %ymm0 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm5
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm5[2],ymm0[3,4],ymm5[5],ymm0[6,7,8,9],ymm5[10],ymm0[11,12],ymm5[13],ymm0[14,15]
+; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k5
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm7, %ymm2, %ymm0 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm4[2],ymm0[3,4],ymm4[5],ymm0[6,7,8,9],ymm4[10],ymm0[11,12],ymm4[13],ymm0[14,15]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,6,13,4,11,2,9,16,23,30,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm0, %ymm24 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm0, %ymm16 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 128(%rdi), %ymm9
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 160(%rdi), %ymm6
 ; AVX512DQBW-SLOW-NEXT:    movw $8772, %ax # imm = 0x2244
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k6
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm8, %ymm7, %ymm0 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm5
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u],zero,zero,xmm5[3,10],zero,zero,zero,xmm5[6,13,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k3
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm9, %ymm6, %ymm0 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[6,13,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero,xmm0[u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm5, %xmm0, %xmm0
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm0, %xmm0
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm11
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 192(%rdi), %xmm8
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 {{.*#+}} xmm18 = <u,u,u,u,u,u,4,11,u,u,u,u,u,u,u,u>
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm18, %xmm11, %xmm5
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm13
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm13[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm5[7]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 240(%rdi), %xmm5
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,zero,xmm5[5,12,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 224(%rdi), %xmm6
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm6[0,7,14],zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm9, %xmm10, %xmm9
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm9, %zmm0, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm18, %xmm8, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 208(%rdi), %xmm10
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm10[u,u,u,u,u,u,2,9,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm4[7]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 240(%rdi), %xmm24
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,zero,xmm24[5,12,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 224(%rdi), %xmm25
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm25[0,7,14],zero,zero,xmm25[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm11, %xmm12, %xmm11
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm11, %zmm0, %zmm0
 ; AVX512DQBW-SLOW-NEXT:    movabsq $137438429184, %rax # imm = 0x1FFFF80000
-; AVX512DQBW-SLOW-NEXT:    kmovq %rax, %k5
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm24 {%k5}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm10
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm9
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm10, %ymm9, %ymm0 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm0[u,u,u,u,u,3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u]
+; AVX512DQBW-SLOW-NEXT:    kmovq %rax, %k1
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm16 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 288(%rdi), %ymm14
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 256(%rdi), %ymm13
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm14, %ymm13, %ymm0 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm0[u,u,u,u,u,3,10],zero,zero,zero,xmm0[6,13],zero,zero,xmm0[u,u]
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u],zero,zero,xmm0[1,8,15],zero,zero,xmm0[4,11,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm12, %xmm0, %xmm20
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 352(%rdi), %ymm14
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm15
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm14, %ymm15, %ymm0 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm0[2,3,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm12[2],ymm0[3,4,5],ymm12[6],ymm0[7,8,9],ymm12[10],ymm0[11,12,13],ymm12[14],ymm0[15]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm11, %xmm0, %xmm20
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 352(%rdi), %ymm17
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 320(%rdi), %ymm0
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm17, %ymm0, %ymm11 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm11[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0,1],ymm12[2],ymm11[3,4,5],ymm12[6],ymm11[7,8,9],ymm12[10],ymm11[11,12,13],ymm12[14],ymm11[15]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,2,9,16,23,30,21,28,19,26,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    movw $3968, %ax # imm = 0xF80
 ; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k7
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm0, %ymm20 {%k7}
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 416(%rdi), %ymm16
-; AVX512DQBW-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm12
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm11, %ymm20 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 416(%rdi), %ymm15
+; AVX512DQBW-SLOW-NEXT:    vmovdqa 384(%rdi), %ymm11
 ; AVX512DQBW-SLOW-NEXT:    movw $4644, %ax # imm = 0x1224
 ; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k4
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm16, %ymm12, %ymm0 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm0, %xmm17
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = xmm17[u,u,u,u,u,u,u],zero,zero,zero,xmm17[6,13],zero,zero,xmm17[2,9]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm17, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm0, %ymm0, %ymm21
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm15, %ymm11, %ymm12 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm12, %xmm19
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm19[u,u,u,u,u,u,u],zero,zero,zero,xmm19[6,13],zero,zero,xmm19[2,9]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[u,u,u,u,u,u,u,1,8,15],zero,zero,xmm12[4,11],zero,zero
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm19, %xmm12, %xmm12
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm12, %ymm0, %ymm21
 ; AVX512DQBW-SLOW-NEXT:    movl $-8388608, %eax # imm = 0xFF800000
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm1, %ymm26, %ymm0 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm0, %xmm17
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm17 = zero,zero,zero,xmm17[6,13],zero,zero,xmm17[2,9,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm17, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm3, %ymm25, %ymm2 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm1, %ymm22, %ymm12 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm12, %xmm19
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = zero,zero,zero,xmm19[6,13],zero,zero,xmm19[2,9,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm12 = xmm12[1,8,15],zero,zero,xmm12[4,11],zero,zero,xmm12[u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm19, %xmm12, %xmm19
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm2, %ymm7, %ymm12 {%k2}
 ; AVX512DQBW-SLOW-NEXT:    kmovw %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 2-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4,5],ymm4[6],ymm2[7,8,9],ymm4[10],ymm2[11,12,13],ymm4[14],ymm2[15]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm17 = ymm2[u,u,u,u,u,u,u,u,u,0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm12[0,1],ymm3[2],ymm12[3,4,5],ymm3[6],ymm12[7,8,9],ymm3[10],ymm12[11,12,13],ymm3[14],ymm12[15]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm3[u,u,u,u,u,u,u,u,u,0,7,14,5,12,3,10,17,24,31,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    movl $511, %edi # imm = 0x1FF
-; AVX512DQBW-SLOW-NEXT:    kmovd %edi, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm17 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm8, %ymm7, %ymm0 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero,xmm0[u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm13[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm11[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm2[7]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm5[6,13,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[1,8,15],zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm2, %xmm4, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm2, %zmm0, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm17 {%k5}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm3, %ymm25, %ymm0 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7,8,9,10],ymm2[11],ymm0[12,13],ymm2[14],ymm0[15]
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm1, %ymm26, %ymm2 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[2,9],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm2, %xmm2
-; AVX512DQBW-SLOW-NEXT:    movl $261632, %edi # imm = 0x3FE00
-; AVX512DQBW-SLOW-NEXT:    kmovd %edi, %k5
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 {%k5} = ymm0[u,u,u,u,u,u,u,u,u,1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm7, %ymm8, %ymm0 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,zero,xmm4[5,12],zero,zero,xmm4[1,8,15,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm18, %xmm13, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm18 = xmm11[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm18[0],xmm4[0],xmm18[1],xmm4[1],xmm18[2],xmm4[2],xmm18[3],xmm4[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm4[7]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[2,9],zero,zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm18 = zero,zero,xmm5[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm4, %xmm18, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm0, %zmm18
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm2, %zmm18 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm3, %ymm25, %ymm0 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5,6],ymm2[7,8],ymm0[9,10],ymm2[11],ymm0[12,13,14],ymm2[15]
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm1, %ymm26, %ymm2 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[3,10],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm2, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 {%k5} = ymm0[u,u,u,u,u,u,u,u,u,2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm7, %ymm8, %ymm0 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,zero,xmm4[6,13],zero,zero,xmm4[2,9,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm13[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm13[5,12]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm11[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
+; AVX512DQBW-SLOW-NEXT:    kmovd %edi, %k6
+; AVX512DQBW-SLOW-NEXT:    kmovd %k6, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm19, %ymm12 {%k6}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm9, %ymm6, %ymm3 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm3[u,u,u,6,13],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u],zero,zero,xmm3[4,11],zero,zero,xmm3[0,7,14,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm19, %xmm3, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm10[u,u,u,u,u,u,3,10,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm23 = xmm8[u,u,u,u,u,u,5,12,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm19 = xmm23[0],xmm19[0],xmm23[1],xmm19[1],xmm23[2],xmm19[2],xmm23[3],xmm19[3]
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm19, %ymm0, %ymm4
+; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm4[7]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm24[6,13,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm25[1,8,15],zero,zero,xmm25[u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vporq %xmm4, %xmm19, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm3, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm3, %zmm12 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm2, %ymm7, %ymm3 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8,9,10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15]
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm1, %ymm22, %ymm4 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm4[2,9],zero,zero,zero,xmm4[5,12],zero,zero,xmm4[u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[0,7,14],zero,zero,xmm4[3,10,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm19, %xmm4, %xmm4
+; AVX512DQBW-SLOW-NEXT:    movl $261632, %edi # imm = 0x3FE00
+; AVX512DQBW-SLOW-NEXT:    kmovd %edi, %k6
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 {%k6} = ymm3[u,u,u,u,u,u,u,u,u,1,8,15,6,13,4,11,18,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm6, %ymm9, %ymm3 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm3, %xmm19
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm19[u,u],zero,zero,zero,xmm19[5,12],zero,zero,xmm19[1,8,15,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,0,7,14],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm19, %xmm3, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm18, %xmm10, %xmm18
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm8[u,u,u,u,u,u,6,13,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm18 = xmm19[0],xmm18[0],xmm19[1],xmm18[1],xmm19[2],xmm18[2],xmm19[3],xmm18[3]
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm18, %ymm0, %ymm5
+; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4,5,6],ymm5[7]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm25[2,9],zero,zero,zero,xmm25[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm18 = zero,zero,xmm24[0,7,14,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm5, %xmm18, %xmm5
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm5, %zmm3, %zmm18
+; AVX512DQBW-SLOW-NEXT:    kmovd {{[-0-9]+}}(%r{{[sb]}}p), %k2 # 4-byte Reload
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm4, %zmm18 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm2, %ymm7, %ymm3 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1,2],ymm4[3],ymm3[4,5,6],ymm4[7,8],ymm3[9,10],ymm4[11],ymm3[12,13,14],ymm4[15]
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm1, %ymm22, %ymm4 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm4[3,10],zero,zero,zero,xmm4[6,13],zero,zero,xmm4[u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = zero,zero,xmm4[1,8,15],zero,zero,xmm4[4,11,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 {%k6} = ymm3[u,u,u,u,u,u,u,u,u,2,9,0,7,14,5,12,19,26,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm6, %ymm9, %ymm3 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm5
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u],zero,zero,zero,xmm5[6,13],zero,zero,xmm5[2,9,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,1,8,15],zero,zero,xmm3[4,11],zero,zero,xmm3[u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm5, %xmm3, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm10[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm10[5,12]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = xmm8[u,u,u,u,u,u,u,u,u,u,u,0,7,14],zero,zero
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm5, %xmm19, %xmm5
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm5
 ; AVX512DQBW-SLOW-NEXT:    movl $-134217728, %edi # imm = 0xF8000000
-; AVX512DQBW-SLOW-NEXT:    kmovd %edi, %k2
-; AVX512DQBW-SLOW-NEXT:    kmovd %k2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm4, %ymm0 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm6[3,10],zero,zero,zero,xmm6[u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = zero,zero,xmm5[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm4, %xmm19, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm4, %zmm0, %zmm19
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm2, %zmm19 {%k1}
+; AVX512DQBW-SLOW-NEXT:    kmovd %edi, %k1
+; AVX512DQBW-SLOW-NEXT:    kmovd %k1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm5, %ymm3 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm25[3,10],zero,zero,zero,xmm25[u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm19 = zero,zero,xmm24[1,8,15,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm5, %xmm19, %xmm5
+; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $2, %xmm5, %zmm3, %zmm19
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm4, %zmm19 {%k2}
 ; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k2
 ; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm21, %ymm20 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm20, %zmm0, %zmm0
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm20, %zmm0, %zmm3
 ; AVX512DQBW-SLOW-NEXT:    movabsq $-137438953472, %rax # imm = 0xFFFFFFE000000000
 ; AVX512DQBW-SLOW-NEXT:    kmovq %rax, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm24 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm9, %ymm10, %ymm0 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,xmm2[2,9],zero,zero,zero,xmm2[5,12,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,4,11],zero,zero,xmm0[0,7,14],zero,zero,xmm0[u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm14, %ymm15, %ymm2 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1,2],ymm4[3],ymm2[4,5],ymm4[6],ymm2[7,8,9,10],ymm4[11],ymm2[12,13],ymm4[14],ymm2[15]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm2, %ymm0 {%k7}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm16, %ymm12, %ymm2 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm2[5,12],zero,zero
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[0,7,14],zero,zero,xmm2[3,10]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm2, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm2, %ymm0 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm17 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm9, %ymm10, %ymm0 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[6,13,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero,xmm0[u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm15, %ymm14, %ymm2 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2],ymm4[3],ymm2[4,5,6],ymm4[7,8],ymm2[9,10],ymm4[11],ymm2[12,13,14],ymm4[15]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm2, %ymm0 {%k7}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm16, %ymm12, %ymm2 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm2[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm2[6,13],zero,zero
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm2, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm2, %ymm0 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm18 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm9, %ymm10, %ymm0 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero,xmm0[u,u]
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm15, %ymm14, %ymm2 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1,2,3],ymm4[4],ymm2[5,6],ymm4[7,8],ymm2[9,10,11],ymm4[12],ymm2[13,14],ymm4[15]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm2, %ymm0 {%k7}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm12, %ymm16, %ymm2 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u],zero,zero,xmm4[2,9],zero,zero,zero,xmm4[5,12]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u,4,11],zero,zero,xmm2[0,7,14],zero,zero
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm2, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm2, %ymm0 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm0, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm0, %zmm19 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm3, %zmm16 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm13, %ymm14, %ymm3 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u],zero,zero,xmm4[2,9],zero,zero,zero,xmm4[5,12,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,4,11],zero,zero,xmm3[0,7,14],zero,zero,xmm3[u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm17, %ymm0, %ymm4 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1,2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7,8,9,10],ymm5[11],ymm4[12,13],ymm5[14],ymm4[15]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,3,10,17,24,31,22,29,20,27,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm4, %ymm3 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm15, %ymm11, %ymm4 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm4[u,u,u,u,u,u,u,2,9],zero,zero,zero,xmm4[5,12],zero,zero
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u],zero,zero,xmm4[0,7,14],zero,zero,xmm4[3,10]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm4, %ymm3 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm3, %zmm12 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm13, %ymm14, %ymm3 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[6,13,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,5,12],zero,zero,xmm3[1,8,15],zero,zero,xmm3[u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm0, %ymm17, %ymm4 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1,2],ymm5[3],ymm4[4,5,6],ymm5[7,8],ymm4[9,10],ymm5[11],ymm4[12,13,14],ymm5[15]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,4,11,18,25,16,23,30,21,28,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm4, %ymm3 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm15, %ymm11, %ymm4 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm4[u,u,u,u,u,u,u,3,10],zero,zero,zero,xmm4[6,13],zero,zero
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u],zero,zero,xmm4[1,8,15],zero,zero,xmm4[4,11]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm4, %ymm3 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm3, %zmm18 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm13, %ymm14, %ymm3 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,6,13],zero,zero,xmm3[2,9],zero,zero,zero,xmm3[u,u]
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u],zero,zero,xmm3[4,11],zero,zero,xmm3[0,7,14,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm0, %ymm17, %ymm4 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0],ymm4[1,2,3],ymm5[4],ymm4[5,6],ymm5[7,8],ymm4[9,10,11],ymm5[12],ymm4[13,14],ymm5[15]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,5,12,19,26,17,24,31,22,29,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm4, %ymm3 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm11, %ymm15, %ymm4 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm5
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u],zero,zero,xmm5[2,9],zero,zero,zero,xmm5[5,12]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u,4,11],zero,zero,xmm4[0,7,14],zero,zero
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm5, %xmm4, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm4, %ymm3 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %zmm3, %zmm19 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 2-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm10, %ymm9, %ymm0 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,0,7,14],zero,zero,xmm0[3,10],zero,zero,zero,xmm0[u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm2, %xmm0, %xmm20
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm15, %ymm14, %ymm0 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6,7,8],ymm2[9],ymm0[10,11],ymm2[12],ymm0[13,14,15]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm0, %ymm20 {%k7}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm10, %ymm9, %ymm0 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[2,9,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,1,8,15],zero,zero,xmm0[4,11],zero,zero,xmm0[u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm14, %ymm15, %ymm2 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm14, %ymm13, %ymm3 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u],zero,zero,zero,xmm4[5,12],zero,zero,xmm4[1,8,15,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,0,7,14],zero,zero,xmm3[3,10],zero,zero,zero,xmm3[u,u]
+; AVX512DQBW-SLOW-NEXT:    vporq %xmm4, %xmm3, %xmm20
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm0, %ymm17, %ymm3 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6,7,8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14,15]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,6,13,20,27,18,25,16,23,30,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm3, %ymm20 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm14, %ymm13, %ymm3 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u],zero,zero,zero,xmm4[6,13],zero,zero,xmm4[2,9,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,1,8,15],zero,zero,xmm3[4,11],zero,zero,xmm3[u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm17, %ymm0, %ymm4 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    kmovq %k1, %k7
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3,4],ymm4[5],ymm2[6,7,8],ymm4[9],ymm2[10,11,12],ymm4[13],ymm2[14,15]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm21 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3,4],ymm5[5],ymm4[6,7,8],ymm5[9],ymm4[10,11,12],ymm5[13],ymm4[14,15]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm21 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,0,7,14,21,28,19,26,17,24,31,u,u,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    movl $8176, %eax # imm = 0x1FF0
 ; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm21 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm12, %ymm16, %ymm0 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u,u],zero,zero,xmm2[3,10],zero,zero,zero,xmm2[6,13]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u,5,12],zero,zero,xmm0[1,8,15],zero,zero
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm20 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm12, %ymm16, %ymm0 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm0[u,u,u,u,u,u,u,6,13],zero,zero,xmm0[2,9],zero,zero,zero
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,u,u,u],zero,zero,xmm0[4,11],zero,zero,xmm0[0,7,14]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm2, %xmm0, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm0, %ymm21 {%k2}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm3, %ymm25, %ymm0 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm26, %ymm1, %ymm23 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm26, %ymm1, %ymm22 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm7, %ymm8, %ymm2 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm26, %ymm1 {%k3}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm7, %ymm8, %ymm4 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm26 = xmm4[u,u,2,9],zero,zero,zero,xmm4[5,12],zero,zero,xmm4[u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,xmm4[0,7,14],zero,zero,xmm4[3,10,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm26, %xmm4, %xmm4
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm26 = xmm13[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm13[6,13]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm27 = xmm11[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm26, %xmm27, %xmm26
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm26, %ymm0, %ymm26
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm3, %ymm21 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm11, %ymm15, %ymm3 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u,u,u],zero,zero,xmm4[3,10],zero,zero,zero,xmm4[6,13]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u,5,12],zero,zero,xmm3[1,8,15],zero,zero
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm3, %ymm20 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm11, %ymm15, %ymm3 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm3[u,u,u,u,u,u,u,6,13],zero,zero,xmm3[2,9],zero,zero,zero
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,u,u],zero,zero,xmm3[4,11],zero,zero,xmm3[0,7,14]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm3, %ymm21 {%k2}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm17, %ymm0 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm0[2,3,0,1]
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7,8,9],ymm3[10],ymm0[11,12],ymm3[13],ymm0[14,15]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm14, %ymm13 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm13[u,u,u,u,2,9],zero,zero,zero,xmm13[5,12],zero,zero,xmm13[u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u],zero,zero,xmm4[0,7,14],zero,zero,xmm4[3,10,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm0, %xmm4, %xmm0
+; AVX512DQBW-SLOW-NEXT:    movl $4186112, %eax # imm = 0x3FE000
+; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 {%k1} = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,1,8,15,22,29,20,27,18,25,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm15, %ymm11 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm2, %ymm7, %ymm14 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm6, %ymm9, %ymm3 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm7, %ymm2, %ymm13 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm22, %ymm1, %ymm4 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm7, %ymm2 {%k3}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm22, %ymm1, %ymm5 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm6, %ymm9, %ymm7 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm22, %ymm1 {%k5}
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm9, %ymm6 {%k4}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm3[u,u,2,9],zero,zero,zero,xmm3[5,12],zero,zero,xmm3[u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u],zero,zero,xmm3[0,7,14],zero,zero,xmm3[3,10,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm3, %xmm9, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm10[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm10[6,13]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm8[u,u,u,u,u,u,u,u,u,u,u,1,8,15],zero,zero
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm9, %xmm15, %xmm9
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
 ; AVX512DQBW-SLOW-NEXT:    kmovd {{[-0-9]+}}(%r{{[sb]}}p), %k1 # 4-byte Reload
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm26, %ymm4 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm26 = xmm2[u,u,3,10],zero,zero,zero,xmm2[6,13],zero,zero,xmm2[u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u],zero,zero,xmm2[1,8,15],zero,zero,xmm2[4,11,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm26, %xmm2, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm26 = xmm11[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm27 = xmm13[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm13[0,7,14]
-; AVX512DQBW-SLOW-NEXT:    vporq %xmm26, %xmm27, %xmm26
-; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQBW-SLOW-NEXT:    vinserti32x4 $1, %xmm26, %ymm0, %ymm26
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm26, %ymm2 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm8, %ymm7 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm8
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u],zero,zero,xmm8[2,9],zero,zero,zero,xmm8[5,12,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u,4,11],zero,zero,xmm7[0,7,14],zero,zero,xmm7[u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm7, %xmm8, %xmm7
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm9, %ymm3 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm7[u,u,3,10],zero,zero,zero,xmm7[6,13],zero,zero,xmm7[u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm7, %xmm7
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[u,u],zero,zero,xmm7[1,8,15],zero,zero,xmm7[4,11,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm7, %xmm9, %xmm7
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm7, %ymm0, %ymm7
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm11[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm13[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm13[1,8,15]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm8, %xmm11, %xmm8
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm8[u,u,u,u,u,u,u,u,u,u,u,2,9],zero,zero,zero
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm15 = xmm10[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm10[0,7,14]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm9, %xmm15, %xmm9
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm9, %ymm7 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm6, %xmm9
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[u,u],zero,zero,xmm9[2,9],zero,zero,zero,xmm9[5,12,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,u,4,11],zero,zero,xmm6[0,7,14],zero,zero,xmm6[u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm6, %xmm9, %xmm6
+; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,u,u,u,u,u,u,u,u,u,u,3,10],zero,zero,zero
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm10[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm10[1,8,15]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm8, %xmm9, %xmm8
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm8, %ymm7 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm8
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm8[0],ymm0[1,2,3],ymm8[4],ymm0[5,6],ymm8[7,8],ymm0[9,10,11],ymm8[12],ymm0[13,14],ymm8[15]
-; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm23, %xmm8
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm8 = zero,zero,xmm8[2,9],zero,zero,zero,xmm8[5,12,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm23[4,11],zero,zero,xmm23[0,7,14],zero,zero,xmm23[u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm8, %xmm11, %xmm8
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 {%k5} = ymm0[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm0, %xmm6, %xmm11
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm5[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm11[0],xmm13[0],xmm11[1],xmm13[1],xmm11[2],xmm13[2],xmm11[3],xmm13[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm4, %zmm4
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm4, %zmm8 {%k5}
-; AVX512DQBW-SLOW-NEXT:    vpblendmw %ymm25, %ymm3, %ymm4 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm11
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm11[1],ymm4[2,3],ymm11[4],ymm4[5,6,7,8],ymm11[9],ymm4[10,11],ymm11[12],ymm4[13,14,15]
-; AVX512DQBW-SLOW-NEXT:    vextracti32x4 $1, %ymm22, %xmm11
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = zero,zero,xmm11[3,10],zero,zero,zero,xmm11[6,13,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm22[5,12],zero,zero,xmm22[1,8,15],zero,zero,xmm22[u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm11, %xmm13, %xmm11
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 {%k5} = ymm4[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm5[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm13 = xmm6[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm13[0],xmm4[0],xmm13[1],xmm4[1],xmm13[2],xmm4[2],xmm13[3],xmm4[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm2, %zmm11 {%k5}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm25, %ymm3 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm3, %xmm2
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3,4],ymm2[5],ymm3[6,7,8],ymm2[9],ymm3[10,11,12],ymm2[13],ymm3[14,15]
+; AVX512DQBW-SLOW-NEXT:    vmovdqu8 %ymm8, %ymm6 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm14, %xmm8
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm14[1,2,3],ymm8[4],ymm14[5,6],ymm8[7,8],ymm14[9,10,11],ymm8[12],ymm14[13,14],ymm8[15]
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm5, %xmm9
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,xmm9[2,9],zero,zero,zero,xmm9[5,12,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm5 = xmm5[4,11],zero,zero,xmm5[0,7,14],zero,zero,xmm5[u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm5, %xmm9, %xmm5
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 {%k6} = ymm8[u,u,u,u,u,u,u,u,u,3,10,1,8,15,6,13,20,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <4,11,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm8, %xmm25, %xmm9
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm10 = xmm24[2,9,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm3, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm3, %zmm5 {%k6}
+; AVX512DQBW-SLOW-NEXT:    movw $-512, %ax # imm = 0xFE00
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm13, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm13[0],ymm3[1],ymm13[2,3],ymm3[4],ymm13[5,6,7,8],ymm3[9],ymm13[10,11],ymm3[12],ymm13[13,14,15]
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm4, %xmm9
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = zero,zero,xmm9[3,10],zero,zero,zero,xmm9[6,13,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[5,12],zero,zero,xmm4[1,8,15],zero,zero,xmm4[u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpor %xmm4, %xmm9, %xmm4
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 {%k6} = ymm3[u,u,u,u,u,u,u,u,u,4,11,2,9,0,7,14,21,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm24[3,10,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm9 = xmm25[5,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm9[0],xmm3[0],xmm9[1],xmm3[1],xmm9[2],xmm3[2],xmm9[3],xmm3[3]
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm7, %zmm3
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm3, %zmm4 {%k6}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3,4],ymm3[5],ymm2[6,7,8],ymm3[9],ymm2[10,11,12],ymm3[13],ymm2[14,15]
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm1[6,13],zero,zero,xmm1[2,9],zero,zero,zero,xmm1[u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm1, %xmm1
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,xmm1[4,11],zero,zero,xmm1[0,7,14,u,u,u,u,u,u,u]
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm3, %xmm1, %xmm1
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 {%k5} = ymm2[u,u,u,u,u,u,u,u,u,5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm6[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm7, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k5}
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm14, %ymm15 {%k4}
-; AVX512DQBW-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm15[2,3,0,1]
-; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm15[0,1],ymm0[2],ymm15[3,4],ymm0[5],ymm15[6,7,8,9],ymm0[10],ymm15[11,12],ymm0[13],ymm15[14,15]
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm10, %ymm9 {%k6}
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm9[u,u,u,u,2,9],zero,zero,zero,xmm9[5,12],zero,zero,xmm9[u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm9, %xmm3
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u],zero,zero,xmm3[0,7,14],zero,zero,xmm3[3,10,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512DQBW-SLOW-NEXT:    movl $4186112, %eax # imm = 0x3FE000
-; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 {%k1} = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,1,8,15,22,29,20,27,18,25,u,u,u,u,u,u,u,u,u,u]
-; AVX512DQBW-SLOW-NEXT:    movw $-512, %ax # imm = 0xFE00
-; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %ymm16, %ymm12 {%k7}
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 {%k6} = ymm2[u,u,u,u,u,u,u,u,u,5,12,3,10,1,8,15,22,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpshufb %xmm8, %xmm24, %xmm2
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm25[6,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVX512DQBW-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm6, %zmm2
+; AVX512DQBW-SLOW-NEXT:    vmovdqu16 %zmm2, %zmm1 {%k6}
 ; AVX512DQBW-SLOW-NEXT:    kmovd %eax, %k1
-; AVX512DQBW-SLOW-NEXT:    vinserti32x8 $1, %ymm20, %zmm0, %zmm8 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti32x8 $1, %ymm21, %zmm0, %zmm11 {%k1}
-; AVX512DQBW-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm12, %xmm2
+; AVX512DQBW-SLOW-NEXT:    vinserti32x8 $1, %ymm20, %zmm0, %zmm5 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vinserti32x8 $1, %ymm21, %zmm0, %zmm4 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm2
 ; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,u,u],zero,zero,zero,xmm2[5,12],zero,zero,xmm2[1,8,15]
-; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm12[u,u,u,u,u,u,0,7,14],zero,zero,xmm12[3,10],zero,zero,zero
+; AVX512DQBW-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm11[u,u,u,u,u,u,0,7,14],zero,zero,xmm11[3,10],zero,zero,zero
 ; AVX512DQBW-SLOW-NEXT:    vpor %xmm2, %xmm3, %xmm2
 ; AVX512DQBW-SLOW-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQBW-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,17,18,19,20,21,22,23,24,25,26,43,44,45,46,47]
-; AVX512DQBW-SLOW-NEXT:    vpermi2w %zmm2, %zmm0, %zmm3
-; AVX512DQBW-SLOW-NEXT:    vinserti32x8 $1, %ymm3, %zmm0, %zmm1 {%k1}
+; AVX512DQBW-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3,4,5,6,7],ymm0[8,9,10],ymm2[11,12,13,14,15]
+; AVX512DQBW-SLOW-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQBW-SLOW-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
 ; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQBW-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rdi
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm24, (%rsi)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm17, (%rdx)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm16, (%rsi)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm12, (%rdx)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm18, (%rcx)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm19, (%r8)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm8, (%r9)
-; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm11, (%rdi)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm5, (%r9)
+; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm4, (%rdi)
 ; AVX512DQBW-SLOW-NEXT:    vmovdqa64 %zmm1, (%rax)
 ; AVX512DQBW-SLOW-NEXT:    vzeroupper
 ; AVX512DQBW-SLOW-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
index 264655bb0ce8e..71505f5912548 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-6.ll
@@ -3541,235 +3541,230 @@ define void @store_i16_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST:       # %bb.0:
 ; AVX512F-ONLY-FAST-NEXT:    pushq %rax
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm9[0],ymm1[0],ymm9[1],ymm1[1],ymm9[2],ymm1[2],ymm9[3],ymm1[3],ymm9[8],ymm1[8],ymm9[9],ymm1[9],ymm9[10],ymm1[10],ymm9[11],ymm1[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm12[0],ymm1[0],ymm12[1],ymm1[1],ymm12[2],ymm1[2],ymm12[3],ymm1[3],ymm12[8],ymm1[8],ymm12[9],ymm1[9],ymm12[10],ymm1[10],ymm12[11],ymm1[11]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = <1,2,1,2,u,u,3,3,13,12,10,10,13,12,14,14>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm1, %zmm23, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = <1,2,1,2,u,u,3,3,13,12,10,10,13,12,14,14>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm0, %zmm20, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,1,1,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm15[0],ymm4[0],ymm15[1],ymm4[1],ymm15[2],ymm4[2],ymm15[3],ymm4[3],ymm15[8],ymm4[8],ymm15[9],ymm4[9],ymm15[10],ymm4[10],ymm15[11],ymm4[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,1,1,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm5[0],ymm10[0],ymm5[1],ymm10[1],ymm5[2],ymm10[2],ymm5[3],ymm10[3],ymm5[8],ymm10[8],ymm5[9],ymm10[9],ymm5[10],ymm10[10],ymm5[11],ymm10[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    movw $18724, %ax # imm = 0x4924
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm21 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [8,9,20,11,12,21,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm12[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm1, %zmm19, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm23 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [8,9,20,11,12,21,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm9[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm0, %zmm22, %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm14[0],ymm1[0],ymm14[1],ymm1[1],ymm14[2],ymm1[2],ymm14[3],ymm1[3],ymm14[8],ymm1[8],ymm14[9],ymm1[9],ymm14[10],ymm1[10],ymm14[11],ymm1[11]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm6, %xmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm16
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm0, %xmm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [0,9,2,3,8,5,6,11]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm3, %ymm20, %ymm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm0, %xmm29
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm3[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm15[0],ymm1[0],ymm15[1],ymm1[1],ymm15[2],ymm1[2],ymm15[3],ymm1[3],ymm15[8],ymm1[8],ymm15[9],ymm1[9],ymm15[10],ymm1[10],ymm15[11],ymm1[11]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm6, %xmm18
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm17
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm3, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [0,9,2,3,8,5,6,11]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm16, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm28
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm7[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm31 = ymm5[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm10 = xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm10[1,1,1,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm11[0],ymm10[0],ymm11[1],ymm10[1],ymm11[2],ymm10[2],ymm11[3],ymm10[3],ymm11[8],ymm10[8],ymm11[9],ymm10[9],ymm11[10],ymm10[10],ymm11[11],ymm10[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm6, %zmm23, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm22, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm24 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm3[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm6, %zmm24, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm6, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm2, %ymm20, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm14[4],ymm1[4],ymm14[5],ymm1[5],ymm14[6],ymm1[6],ymm14[7],ymm1[7],ymm14[12],ymm1[12],ymm14[13],ymm1[13],ymm14[14],ymm1[14],ymm14[15],ymm1[15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm4[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm0[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm19 = ymm0[1,1,1,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm0[0],ymm11[0],ymm0[1],ymm11[1],ymm0[2],ymm11[2],ymm0[3],ymm11[3],ymm0[8],ymm11[8],ymm0[9],ymm11[9],ymm0[10],ymm11[10],ymm0[11],ymm11[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm2, %zmm20, %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm19, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm2, %zmm25 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm2[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm8, %zmm25, %zmm22
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm8, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm14, %ymm16, %ymm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm14, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm3[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [5,6,5,6,5,6,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm15[4],ymm1[4],ymm15[5],ymm1[5],ymm15[6],ymm1[6],ymm15[7],ymm1[7],ymm15[12],ymm1[12],ymm15[13],ymm1[13],ymm15[14],ymm1[14],ymm15[15],ymm1[15]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm3, %ymm24, %ymm16
 ; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm1 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm14 = ymm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm14[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm14 = ymm14[0],ymm1[0],ymm14[1],ymm1[1],ymm14[2],ymm1[2],ymm14[3],ymm1[3],ymm14[8],ymm1[8],ymm14[9],ymm1[9],ymm14[10],ymm1[10],ymm14[11],ymm1[11]
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm15 = ymm15[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm15[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm15[0],ymm1[0],ymm15[1],ymm1[1],ymm15[2],ymm1[2],ymm15[3],ymm1[3],ymm15[8],ymm1[8],ymm15[9],ymm1[9],ymm15[10],ymm1[10],ymm15[11],ymm1[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm1[2,2,2,2]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm10, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm13[0],ymm0[0],ymm13[1],ymm0[1],ymm13[2],ymm0[2],ymm13[3],ymm0[3],ymm13[8],ymm0[8],ymm13[9],ymm0[9],ymm13[10],ymm0[10],ymm13[11],ymm0[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [5,6,5,6,5,6,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm2, %ymm13, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm10 = ymm11[4],ymm10[4],ymm11[5],ymm10[5],ymm11[6],ymm10[6],ymm11[7],ymm10[7],ymm11[12],ymm10[12],ymm11[13],ymm10[13],ymm11[14],ymm10[14],ymm11[15],ymm10[15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[3,3,3,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm14, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm2, %zmm14 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vextracti64x4 $1, %zmm14, %ymm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [0,13,2,3,12,5,6,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm3, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm23, %ymm20
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm3[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [12,1,2,13,4,5,14,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm3, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm9[4],ymm2[4],ymm9[5],ymm2[5],ymm9[6],ymm2[6],ymm9[7],ymm2[7],ymm9[12],ymm2[12],ymm9[13],ymm2[13],ymm9[14],ymm2[14],ymm9[15],ymm2[15]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm0, %ymm13, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm26, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm0, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm11[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm11 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm9 = ymm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm9[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm9 = ymm9[0],ymm11[0],ymm9[1],ymm11[1],ymm9[2],ymm11[2],ymm9[3],ymm11[3],ymm9[8],ymm11[8],ymm9[9],ymm11[9],ymm9[10],ymm11[10],ymm9[11],ymm11[11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm15, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm11[0],ymm1[1],ymm11[1],ymm1[2],ymm11[2],ymm1[3],ymm11[3],ymm1[8],ymm11[8],ymm1[9],ymm11[9],ymm1[10],ymm11[10],ymm1[11],ymm11[11]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm15[4],ymm4[4],ymm15[5],ymm4[5],ymm15[6],ymm4[6],ymm15[7],ymm4[7],ymm15[12],ymm4[12],ymm15[13],ymm4[13],ymm15[14],ymm4[14],ymm15[15],ymm4[15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm13[0],ymm3[0],ymm13[1],ymm3[1],ymm13[2],ymm3[2],ymm13[3],ymm3[3],ymm13[8],ymm3[8],ymm13[9],ymm3[9],ymm13[10],ymm3[10],ymm13[11],ymm3[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm11[4],ymm0[5],ymm11[5],ymm0[6],ymm11[6],ymm0[7],ymm11[7],ymm0[12],ymm11[12],ymm0[13],ymm11[13],ymm0[14],ymm11[14],ymm0[15],ymm11[15]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[3,3,3,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm15, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm11, %zmm15 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [8,21,10,11,20,13,14,23]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm2, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm3, %zmm16, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm31 = [12,1,2,13,4,5,14,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm2, %ymm31, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm12[4],ymm3[4],ymm12[5],ymm3[5],ymm12[6],ymm3[6],ymm12[7],ymm3[7],ymm12[12],ymm3[12],ymm12[13],ymm3[13],ymm12[14],ymm3[14],ymm12[15],ymm3[15]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm2, %ymm24, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm15 = ymm11[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm25 = ymm15[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm11[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm24 = ymm13[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm13 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm12 = ymm12[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm12[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm12 = ymm12[0],ymm13[0],ymm12[1],ymm13[1],ymm12[2],ymm13[2],ymm12[3],ymm13[3],ymm12[8],ymm13[8],ymm12[9],ymm13[9],ymm12[10],ymm13[10],ymm12[11],ymm13[11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm10, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm13[0],ymm1[1],ymm13[1],ymm1[2],ymm13[2],ymm1[3],ymm13[3],ymm1[8],ymm13[8],ymm1[9],ymm13[9],ymm1[10],ymm13[10],ymm1[11],ymm13[11]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm5[4],ymm10[4],ymm5[5],ymm10[5],ymm5[6],ymm10[6],ymm5[7],ymm10[7],ymm5[12],ymm10[12],ymm5[13],ymm10[13],ymm5[14],ymm10[14],ymm5[15],ymm10[15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm11, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm10[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,2]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[3,3,3,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm9, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm9, %zmm4 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vextracti64x4 $1, %zmm4, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm12, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm1, %ymm23, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm2[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm11, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm23 = ymm10[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm10 = ymm12[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm10, %ymm3, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm28 = ymm1[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm7, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm1[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm16, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm17, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm8, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm5, %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm11[4],xmm7[4],xmm11[5],xmm7[5],xmm11[6],xmm7[6],xmm11[7],xmm7[7]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [1,0,2,2,1,0,2,2]
-; AVX512F-ONLY-FAST-NEXT:    # ymm11 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm2, %ymm11, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm3, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm5[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm2, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[3,3,3,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm12, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm2, %zmm5 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm9, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm0, %zmm5, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm11, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm29 = ymm1[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm9[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm1, %ymm31, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm4, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm31 = ymm1[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm17, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm7, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm6, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [1,0,2,2,1,0,2,2]
+; AVX512F-ONLY-FAST-NEXT:    # ymm13 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm13, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm2, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm6[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm1, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    movw $9362, %ax # imm = 0x2492
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm2 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [16,9,10,17,12,13,18,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm6[2,1,3,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm5, %zmm3, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm5 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm0[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm2, %zmm1 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [16,9,10,17,12,13,18,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm8[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm4, %zmm2, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm4 = xmm8[0],zero,xmm8[1],zero,xmm8[2],zero,xmm8[3],zero
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm14[0,0,2,1,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm6, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm7, %ymm11, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm11 = xmm15[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm15 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm15[0],xmm11[0],xmm15[1],xmm11[1],xmm15[2],xmm11[2],xmm15[3],xmm11[3]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm11, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm1, %xmm15
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm10, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm13 = xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm15 = xmm10[0],xmm1[0],xmm10[1],xmm1[1],xmm10[2],xmm1[2],xmm10[3],xmm1[3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm7, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm15[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm11, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm7, %zmm11 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm27, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm1[2,1,3,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm7, %zmm11, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm10[0],xmm3[0],xmm10[1],xmm3[1],xmm10[2],xmm3[2],xmm10[3],xmm3[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm7, %ymm13, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm14, %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm14 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm10 = xmm10[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm10, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm3, %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm11, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm11[0],xmm3[0],xmm11[1],xmm3[1],xmm11[2],xmm3[2],xmm11[3],xmm3[3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm7, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm14[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm10, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm7, %zmm10 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm27, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm3[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm7, %zmm10, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm7 = xmm27[0],zero,xmm27[1],zero,xmm27[2],zero,xmm27[3],zero
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm29, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm1[0,0,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm13, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm28, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm3[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm12, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm3, %xmm8
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm15 = zmm21[0,1,2,3],zmm18[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm31, %zmm1, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm15, %zmm17, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm15 = zmm24[0,1,2,3],zmm19[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm22, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm15, %zmm17, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm0, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 = zmm14[0,1,2,3],zmm15[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm23, %zmm26, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm14, %zmm17, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm9[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm28, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm17, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,8,3,4,9,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm5, %ymm4, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm12[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm6, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 = zmm23[0,1,2,3],zmm21[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm3, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm14, %zmm18, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 = zmm25[0,1,2,3],zmm22[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm20, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm14, %zmm18, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm14 = zmm15[0,1,2,3],zmm19[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm29, %zmm26, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm14, %zmm15, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm16[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm31, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm15, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,8,3,4,9,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm4, %ymm3, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm9[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm6, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm5, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm7, %ymm4, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm11[0,1,2,3],zmm3[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm13, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm5, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 192(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 128(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, 320(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, 256(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 64(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm5, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm7, %ymm3, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm10[0,1,2,3],zmm2[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm12, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm5, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 192(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 128(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 320(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 256(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 64(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    popq %rax
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
@@ -4042,212 +4037,209 @@ define void @store_i16_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15]
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[3,3,3,3]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %ymm5
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm5[4],ymm2[4],ymm5[5],ymm2[5],ymm5[6],ymm2[6],ymm5[7],ymm2[7],ymm5[12],ymm2[12],ymm5[13],ymm2[13],ymm5[14],ymm2[14],ymm5[15],ymm2[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [5,6,5,6,5,6,7,7]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm1, %ymm4, %ymm1
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm2 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm3 = ymm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm5[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm5[4],ymm3[4],ymm5[5],ymm3[5],ymm5[6],ymm3[6],ymm5[7],ymm3[7],ymm5[12],ymm3[12],ymm5[13],ymm3[13],ymm5[14],ymm3[14],ymm5[15],ymm3[15]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [5,6,5,6,5,6,7,7]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm1, %ymm2, %ymm1
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm3 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm4 = ymm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm5[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm29
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
 ; AVX512DQ-FAST-NEXT:    movw $18724, %ax # imm = 0x4924
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm2, %zmm22 {%k1}
-; AVX512DQ-FAST-NEXT:    vextracti64x4 $1, %zmm22, %ymm20
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %ymm3
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm3, %zmm22 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [8,21,10,11,20,13,14,23]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %ymm4
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm3, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,13,2,3,12,5,6,15]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm2, %ymm8, %ymm20
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm3[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm31
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [12,1,2,13,4,5,14,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm2, %ymm11, %ymm22
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, %zmm20
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm3, %zmm21, %zmm20
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm4[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm31
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [12,1,2,13,4,5,14,7]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm3, %ymm9, %ymm22
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %ymm5
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm3
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm5, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm5, %ymm4
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm5, %ymm4
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm17
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm3[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm4[2,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %ymm5
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm3
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm4
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm15
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm15, %ymm0
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm3[0],ymm0[1],ymm3[1],ymm0[2],ymm3[2],ymm0[3],ymm3[3],ymm0[8],ymm3[8],ymm0[9],ymm3[9],ymm0[10],ymm3[10],ymm0[11],ymm3[11]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[1],ymm4[1],ymm0[2],ymm4[2],ymm0[3],ymm4[3],ymm0[8],ymm4[8],ymm0[9],ymm4[9],ymm0[10],ymm4[10],ymm0[11],ymm4[11]
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm15[4],ymm5[4],ymm15[5],ymm5[5],ymm15[6],ymm5[6],ymm15[7],ymm5[7],ymm15[12],ymm5[12],ymm15[13],ymm5[13],ymm15[14],ymm5[14],ymm15[15],ymm5[15]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm4 = ymm15[4],ymm5[4],ymm15[5],ymm5[5],ymm15[6],ymm5[6],ymm15[7],ymm5[7],ymm15[12],ymm5[12],ymm15[13],ymm5[13],ymm15[14],ymm5[14],ymm15[15],ymm5[15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm16
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[3,3,3,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm23
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[3,3,3,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm23
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %ymm13
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %ymm3
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm3[4],ymm13[4],ymm3[5],ymm13[5],ymm3[6],ymm13[6],ymm3[7],ymm13[7],ymm3[12],ymm13[12],ymm3[13],ymm13[13],ymm3[14],ymm13[14],ymm3[15],ymm13[15]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm0, %ymm4, %ymm0
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm4 = ymm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm13[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm9 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm9[0],ymm4[0],ymm9[1],ymm4[1],ymm9[2],ymm4[2],ymm9[3],ymm4[3],ymm9[8],ymm4[8],ymm9[9],ymm4[9],ymm9[10],ymm4[10],ymm9[11],ymm4[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm4, %zmm23 {%k1}
-; AVX512DQ-FAST-NEXT:    vextracti64x4 $1, %zmm23, %ymm21
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm9
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm9, %ymm0
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm8, %ymm21
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm9[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm11, %ymm23
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm8
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm8, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %ymm4
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm4[4],ymm13[4],ymm4[5],ymm13[5],ymm4[6],ymm13[6],ymm4[7],ymm13[7],ymm4[12],ymm13[12],ymm4[13],ymm13[13],ymm4[14],ymm13[14],ymm4[15],ymm13[15]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm0, %ymm2, %ymm0
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm2 = ymm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm13[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm10 = ymm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm4[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm10[0],ymm2[0],ymm10[1],ymm2[1],ymm10[2],ymm2[2],ymm10[3],ymm2[3],ymm10[8],ymm2[8],ymm10[9],ymm2[9],ymm10[10],ymm2[10],ymm10[11],ymm2[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm2, %zmm23 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm10
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm10, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm0, %zmm23, %zmm21
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm10[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm9, %ymm23
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm9
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm9, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm18 = ymm0[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm8, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm9, %ymm0
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm19 = ymm0[2,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %xmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %xmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm6
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm3[0],ymm13[0],ymm3[1],ymm13[1],ymm3[2],ymm13[2],ymm3[3],ymm13[3],ymm3[8],ymm13[8],ymm3[9],ymm13[9],ymm3[10],ymm13[10],ymm3[11],ymm13[11]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm25
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm12, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm3
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm0[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm0[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %xmm8
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm3
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm4[0],ymm13[0],ymm4[1],ymm13[1],ymm4[2],ymm13[2],ymm4[3],ymm13[3],ymm4[8],ymm13[8],ymm4[9],ymm13[9],ymm4[10],ymm13[10],ymm4[11],ymm13[11]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm3[4],xmm8[4],xmm3[5],xmm8[5],xmm3[6],xmm8[6],xmm3[7],xmm8[7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm24
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm12, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm13
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm13, %xmm4
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm0[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm25 = ymm0[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %xmm11
 ; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm26 = [1,0,2,2,1,0,2,2]
 ; AVX512DQ-FAST-NEXT:    # ymm26 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %xmm13
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm13[0],xmm1[0],xmm13[1],xmm1[1],xmm13[2],xmm1[2],xmm13[3],xmm1[3]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %xmm2
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3]
 ; AVX512DQ-FAST-NEXT:    vpermd %ymm5, %ymm26, %ymm5
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
 ; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm10, %zmm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm25, %zmm7
 ; AVX512DQ-FAST-NEXT:    movw $9362, %ax # imm = 0x2492
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k2
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm5, %zmm3 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [16,9,10,17,12,13,18,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, %zmm27
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %xmm10
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm10[2,1,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm24, %zmm27
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm7
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm5, %xmm0
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm7, %xmm2
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm2, %ymm26, %ymm26
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %xmm2
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm2[0,0,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm5, %zmm7 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm27 = [16,9,10,17,12,13,18,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, %zmm25
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %xmm4
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm4[2,1,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm0, %zmm27, %zmm25
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm5
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm0, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm5, %xmm6
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm6, %ymm26, %ymm26
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %xmm6
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm6[0,0,2,1,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm14, %ymm28
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm11 = xmm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm6, %ymm6
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm11, %zmm0
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm6, %zmm26, %zmm0 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %xmm6
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm6[2,1,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm11, %zmm0, %zmm24
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[1,1,1,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm16, %ymm7
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm15[0],ymm7[0],ymm15[1],ymm7[1],ymm15[2],ymm7[2],ymm15[3],ymm7[3],ymm15[8],ymm7[8],ymm15[9],ymm7[9],ymm15[10],ymm7[10],ymm15[11],ymm7[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm11 = <1,2,1,2,u,u,3,3,13,12,10,10,13,12,14,14>
-; AVX512DQ-FAST-NEXT:    vpermd %zmm25, %zmm11, %zmm15
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm7, %zmm5, %zmm15 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [8,9,20,11,12,21,14,15]
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm9[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm9
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm7, %zmm5, %zmm9
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1],xmm3[2],xmm8[2],xmm3[3],xmm8[3]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm3, %ymm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm1
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm3, %zmm26, %zmm1 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %xmm3
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm3[2,1,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm8, %zmm1, %zmm27
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[1,1,1,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm16, %ymm5
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm5 = ymm15[0],ymm5[0],ymm15[1],ymm5[1],ymm15[2],ymm5[2],ymm15[3],ymm5[3],ymm15[8],ymm5[8],ymm15[9],ymm5[9],ymm15[10],ymm5[10],ymm15[11],ymm5[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <1,2,1,2,u,u,3,3,13,12,10,10,13,12,14,14>
+; AVX512DQ-FAST-NEXT:    vpermd %zmm24, %zmm8, %zmm15
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm5, %zmm0, %zmm15 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [8,9,20,11,12,21,14,15]
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm10[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm15, %zmm10
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm0, %zmm10
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm14
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm7 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm7 = ymm14[0],mem[0],ymm14[1],mem[1],ymm14[2],mem[2],ymm14[3],mem[3],ymm14[8],mem[8],ymm14[9],mem[9],ymm14[10],mem[10],ymm14[11],mem[11]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm13[4],xmm1[4],xmm13[5],xmm1[5],xmm13[6],xmm1[6],xmm13[7],xmm1[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm6, %xmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm25 = [0,9,2,3,8,5,6,11]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm14, %ymm25, %ymm15
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm2, %xmm14
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm14, %ymm5 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm5 = ymm14[0],mem[0],ymm14[1],mem[1],ymm14[2],mem[2],ymm14[3],mem[3],ymm14[8],mem[8],ymm14[9],mem[9],ymm14[10],mem[10],ymm14[11],mem[11]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm3, %xmm14
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [0,9,2,3,8,5,6,11]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm14, %ymm24, %ymm15
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm6, %xmm14
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm14[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
-; AVX512DQ-FAST-NEXT:    vpermd %zmm1, %zmm11, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %xmm11
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm4[4],xmm12[4],xmm4[5],xmm12[5],xmm4[6],xmm12[6],xmm4[7],xmm12[7]
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm11[0,0,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm12, %ymm29
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm11, %xmm13
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm12, %ymm14 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm14 = ymm12[0],mem[0],ymm12[1],mem[1],ymm12[2],mem[2],ymm12[3],mem[3],ymm12[8],mem[8],ymm12[9],mem[9],ymm12[10],mem[10],ymm12[11],mem[11]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm11, %xmm11
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm8[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[1,1,1,1]
+; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; AVX512DQ-FAST-NEXT:    vpermd %zmm2, %zmm8, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %xmm8
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm8[0,0,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm13, %ymm29
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm8, %xmm11
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm13, %ymm14 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm14 = ymm13[0],mem[0],ymm13[1],mem[1],ymm13[2],mem[2],ymm13[3],mem[3],ymm13[8],mem[8],ymm13[9],mem[9],ymm13[10],mem[10],ymm13[11],mem[11]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm8, %xmm8
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm9[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[1,1,1,1]
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm14, %zmm4, %zmm1 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm4
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm4[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm4, %zmm1, %zmm5
-; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm4 = xmm10[0],zero,xmm10[1],zero,xmm10[2],zero,xmm10[3],zero
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm10, %xmm7
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm7, %ymm25, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm17, %ymm7
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm7 = ymm7[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm0, %zmm10
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm22[0,1,2,3],zmm10[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm12 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm12, %zmm12
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm14, %zmm12, %zmm2 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm12
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm12[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm12, %zmm2, %zmm0
+; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm12 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm4, %ymm24, %ymm2
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm6, %xmm4
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm17, %ymm5
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm5[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm22[0,1,2,3],zmm20[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm13 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm13, %zmm13
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm10, %zmm14, %zmm12
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm10
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm23[0,1,2,3],zmm10[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm14, %zmm13
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm23[0,1,2,3],zmm21[0,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm18, %zmm16
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm10, %zmm14, %zmm16
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [0,1,8,3,4,9,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm4, %ymm10, %ymm3
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm6, %ymm10, %ymm0
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm27[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm26, %zmm28, %zmm4
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm6, %zmm4
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm24[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm29, %zmm3
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm6, %zmm3
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm15[0,1,2,3],zmm9[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm11, %zmm6
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm14, %zmm16
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [0,1,8,3,4,9,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm12, %ymm6, %ymm7
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm3, %ymm6, %ymm1
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm7[0,1,2,3],zmm25[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm26, %zmm28, %zmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm7, %zmm6
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm27[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm29, %zmm3
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm7, %zmm3
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm15[0,1,2,3],zmm10[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm7
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm8, %zmm6
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm5[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm2, %zmm1
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm8, %zmm7
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm0[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm1
 ; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm8, %zmm1
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 256(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 64(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm7, 64(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, (%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, 192(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 192(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, 128(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm12, 320(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, 320(%rax)
 ; AVX512DQ-FAST-NEXT:    addq $40, %rsp
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
@@ -7981,82 +7973,84 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-ONLY-FAST-LABEL: store_i16_stride6_vf64:
 ; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $936, %rsp # imm = 0x3A8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    subq $1064, %rsp # imm = 0x428
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 64(%rcx), %xmm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rcx), %xmm7
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm7, (%rsp) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm9, (%rsp) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %xmm8
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rcx), %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdx), %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm12[0],ymm11[0],ymm12[1],ymm11[1],ymm12[2],ymm11[2],ymm12[3],ymm11[3],ymm12[8],ymm11[8],ymm12[9],ymm11[9],ymm12[10],ymm11[10],ymm12[11],ymm11[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rcx), %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdx), %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm8, %xmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm29
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdx), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm11[0],ymm2[1],ymm11[1],ymm2[2],ymm11[2],ymm2[3],ymm11[3],ymm2[8],ymm11[8],ymm2[9],ymm11[9],ymm2[10],ymm11[10],ymm2[11],ymm11[11]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rcx), %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdx), %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm6, %xmm23
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm16
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rcx), %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm9[0],ymm14[0],ymm9[1],ymm14[1],ymm9[2],ymm14[2],ymm9[3],ymm14[3],ymm9[8],ymm14[8],ymm9[9],ymm14[9],ymm9[10],ymm14[10],ymm9[11],ymm14[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm26, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm25
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm15[0],ymm13[0],ymm15[1],ymm13[1],ymm15[2],ymm13[2],ymm15[3],ymm13[3],ymm15[8],ymm13[8],ymm15[9],ymm13[9],ymm15[10],ymm13[10],ymm15[11],ymm13[11]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm5, %zmm28
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdx), %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm6[0],ymm14[0],ymm6[1],ymm14[1],ymm6[2],ymm14[2],ymm6[3],ymm14[3],ymm6[8],ymm14[8],ymm6[9],ymm14[9],ymm6[10],ymm14[10],ymm6[11],ymm14[11]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm8, %xmm31
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm7, %xmm21
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm0[0],ymm4[0],ymm0[1],ymm4[1],ymm0[2],ymm4[2],ymm0[3],ymm4[3],ymm0[8],ymm4[8],ymm0[9],ymm4[9],ymm0[10],ymm4[10],ymm0[11],ymm4[11]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm9[4],xmm3[4],xmm9[5],xmm3[5],xmm9[6],xmm3[6],xmm9[7],xmm3[7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm0[1,1,1,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[1,1,1,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm0[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = <1,2,1,2,u,u,3,3,13,12,10,10,13,12,14,14>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm1, %zmm20, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm12 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <1,2,1,2,u,u,3,3,13,12,10,10,13,12,14,14>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm1, %zmm22, %zmm18
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm7, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    movw $18724, %ax # imm = 0x4924
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm18 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [8,9,20,11,12,21,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [8,9,20,11,12,21,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm1, %zmm21, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm1, %zmm17, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [0,9,2,3,8,5,6,11]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [0,9,2,3,8,5,6,11]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm1, %ymm22, %ymm18
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm1, %ymm8, %ymm18
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, %xmm9
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm1[0,1,0,1]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm0
@@ -8064,406 +8058,405 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm0[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm1[2,2,2,2]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rsi), %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rsi), %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdi), %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,1,1,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rsi), %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm0[0],ymm8[0],ymm0[1],ymm8[1],ymm0[2],ymm8[2],ymm0[3],ymm8[3],ymm0[8],ymm8[8],ymm0[9],ymm8[9],ymm0[10],ymm8[10],ymm0[11],ymm8[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm20, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm16 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r8), %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm6[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm1, %zmm21, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r8), %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm2, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm1, %ymm22, %ymm16
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r9), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm10, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r9), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm1[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [5,6,5,6,5,6,7,7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm3 = ymm12[4],ymm11[4],ymm12[5],ymm11[5],ymm12[6],ymm11[6],ymm12[7],ymm11[7],ymm12[12],ymm11[12],ymm12[13],ymm11[13],ymm12[14],ymm11[14],ymm12[15],ymm11[15]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm3, %ymm23, %ymm19
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm3 = ymm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm11[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm11 = ymm12[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm12[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm11[0],ymm3[0],ymm11[1],ymm3[1],ymm11[2],ymm3[2],ymm11[3],ymm3[3],ymm11[8],ymm3[8],ymm11[9],ymm3[9],ymm11[10],ymm3[10],ymm11[11],ymm3[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm3[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm8, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm27
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm10 = ymm10[0],ymm12[0],ymm10[1],ymm12[1],ymm10[2],ymm12[2],ymm10[3],ymm12[3],ymm10[8],ymm12[8],ymm10[9],ymm12[9],ymm10[10],ymm12[10],ymm10[11],ymm12[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm0[4],ymm8[4],ymm0[5],ymm8[5],ymm0[6],ymm8[6],ymm0[7],ymm8[7],ymm0[12],ymm8[12],ymm0[13],ymm8[13],ymm0[14],ymm8[14],ymm0[15],ymm8[15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[3,3,3,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm11, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm10, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm8, %zmm12 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vextracti64x4 $1, %zmm12, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,13,2,3,12,5,6,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm6, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm3, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm3, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm6[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [12,1,2,13,4,5,14,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm3, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[1,1,1,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rsi), %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%rdi), %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm15[0],ymm13[0],ymm15[1],ymm13[1],ymm15[2],ymm13[2],ymm15[3],ymm13[3],ymm15[8],ymm13[8],ymm15[9],ymm13[9],ymm15[10],ymm13[10],ymm15[11],ymm13[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm12, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm16, %zmm22, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm16 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r8), %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm3[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm12, %zmm17, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r8), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm0, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, %xmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm12, %ymm8, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r9), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm12[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r9), %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm12[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm9[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [5,6,5,6,5,6,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm9 = ymm2[4],ymm11[4],ymm2[5],ymm11[5],ymm2[6],ymm11[6],ymm2[7],ymm11[7],ymm2[12],ymm11[12],ymm2[13],ymm11[13],ymm2[14],ymm11[14],ymm2[15],ymm11[15]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm9, %ymm24, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm11 = ymm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm11[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm2 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm11[0],ymm2[1],ymm11[1],ymm2[2],ymm11[2],ymm2[3],ymm11[3],ymm2[8],ymm11[8],ymm2[9],ymm11[9],ymm2[10],ymm11[10],ymm2[11],ymm11[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm2[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm19
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm10[0],ymm0[0],ymm10[1],ymm0[1],ymm10[2],ymm0[2],ymm10[3],ymm0[3],ymm10[8],ymm0[8],ymm10[9],ymm0[9],ymm10[10],ymm0[10],ymm10[11],ymm0[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm10 = ymm15[4],ymm13[4],ymm15[5],ymm13[5],ymm15[6],ymm13[6],ymm15[7],ymm13[7],ymm15[12],ymm13[12],ymm15[13],ymm13[13],ymm15[14],ymm13[14],ymm15[15],ymm13[15]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[3,3,3,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm11, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm9, %zmm25 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [8,21,10,11,20,13,14,23]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm25, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm2, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm0, %zmm20, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm3[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [12,1,2,13,4,5,14,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm2, %ymm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm2, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm12[u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm9[4],ymm14[4],ymm9[5],ymm14[5],ymm9[6],ymm14[6],ymm9[7],ymm14[7],ymm9[12],ymm14[12],ymm9[13],ymm14[13],ymm9[14],ymm14[14],ymm9[15],ymm14[15]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm0, %ymm23, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm1 = ymm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm14[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm6 = ymm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm9[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm6[0],ymm1[0],ymm6[1],ymm1[1],ymm6[2],ymm1[2],ymm6[3],ymm1[3],ymm6[8],ymm1[8],ymm6[9],ymm1[9],ymm6[10],ymm1[10],ymm6[11],ymm1[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rsi), %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm8, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm11, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm6[4],ymm14[4],ymm6[5],ymm14[5],ymm6[6],ymm14[6],ymm6[7],ymm14[7],ymm6[12],ymm14[12],ymm6[13],ymm14[13],ymm6[14],ymm14[14],ymm6[15],ymm14[15]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm0, %ymm24, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm3 = ymm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm14[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm6 = ymm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm6[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm6[0],ymm3[0],ymm6[1],ymm3[1],ymm6[2],ymm3[2],ymm6[3],ymm3[3],ymm6[8],ymm3[8],ymm6[9],ymm3[9],ymm6[10],ymm3[10],ymm6[11],ymm3[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rsi), %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm11, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm9
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm9[0],ymm6[0],ymm9[1],ymm6[1],ymm9[2],ymm6[2],ymm9[3],ymm6[3],ymm9[8],ymm6[8],ymm9[9],ymm6[9],ymm9[10],ymm6[10],ymm9[11],ymm6[11]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm9 = ymm11[4],ymm8[4],ymm11[5],ymm8[5],ymm11[6],ymm8[6],ymm11[7],ymm8[7],ymm11[12],ymm8[12],ymm11[13],ymm8[13],ymm11[14],ymm8[14],ymm11[15],ymm8[15]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm9 = ymm15[4],ymm11[4],ymm15[5],ymm11[5],ymm15[6],ymm11[6],ymm15[7],ymm11[7],ymm15[12],ymm11[12],ymm15[13],ymm11[13],ymm15[14],ymm11[14],ymm15[15],ymm11[15]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[3,3,3,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm6, %zmm14
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm14 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vextracti64x4 $1, %zmm14, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, %zmm29
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r8), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm0, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm1, %ymm10, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm0[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm1, %ymm3, %ymm14
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm30 = [1,0,2,2,1,0,2,2]
-; AVX512F-ONLY-FAST-NEXT:    # ymm30 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm17, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm29, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm9[0],xmm6[0],xmm9[1],xmm6[1],xmm9[2],xmm6[2],xmm9[3],xmm6[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm30, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm9 = xmm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm13, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm3, %zmm20, %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm0[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm3, %ymm10, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm19 = [1,0,2,2,1,0,2,2]
+; AVX512F-ONLY-FAST-NEXT:    # ymm19 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm23, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm3, %ymm19, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm9 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm9[0],xmm6[0],xmm9[1],xmm6[1],xmm9[2],xmm6[2],xmm9[3],xmm6[3]
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm6, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm5, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm7, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm7, %xmm10
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm5, %zmm29
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm1, %zmm30
 ; AVX512F-ONLY-FAST-NEXT:    movw $9362, %ax # imm = 0x2492
 ; AVX512F-ONLY-FAST-NEXT:    kmovw %eax, %k1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm29 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [16,9,10,17,12,13,18,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm29, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[2,1,3,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm1, %zmm17, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm26, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm25, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm30, %ymm19
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm5, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm30 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [16,9,10,17,12,13,18,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm30, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm1, %zmm23, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm31, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm19, %ymm31
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm5[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm3, %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rsi), %xmm7
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%rdi), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm7, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm7, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm5 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm19, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm6, %zmm26
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm5, %zmm26 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm26, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r8), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[2,1,3,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm2, %zmm17, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm31, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm31
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm3, %zmm31 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm28
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r8), %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm2[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm3, %zmm23, %zmm28
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[1,1,1,1]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm11[0],ymm8[0],ymm11[1],ymm8[1],ymm11[2],ymm8[2],ymm11[3],ymm8[3],ymm11[8],ymm8[8],ymm11[9],ymm8[9],ymm11[10],ymm8[10],ymm11[11],ymm8[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm31, %zmm20, %zmm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm31 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm15[0],ymm11[0],ymm15[1],ymm11[1],ymm15[2],ymm11[2],ymm15[3],ymm11[3],ymm15[8],ymm11[8],ymm15[9],ymm11[9],ymm15[10],ymm11[10],ymm15[11],ymm11[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm27, %zmm22, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm27 {%k2}
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm31, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm0, %zmm21, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm27, %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm0, %zmm17, %zmm21
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm22, %ymm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm8
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[1,1,1,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm0[0],ymm5[0],ymm0[1],ymm5[1],ymm0[2],ymm5[2],ymm0[3],ymm5[3],ymm0[8],ymm5[8],ymm0[9],ymm5[9],ymm0[10],ymm5[10],ymm0[11],ymm5[11]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm28, %zmm20, %zmm20
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm20 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm7[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm1, %zmm20, %zmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm1, %ymm22, %ymm20
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm15[4],ymm13[4],ymm15[5],ymm13[5],ymm15[6],ymm13[6],ymm15[7],ymm13[7],ymm15[12],ymm13[12],ymm15[13],ymm13[13],ymm15[14],ymm13[14],ymm15[15],ymm13[15]
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm13 = ymm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm13[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm4 = ymm15[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm15[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm4[0],ymm13[0],ymm4[1],ymm13[1],ymm4[2],ymm13[2],ymm4[3],ymm13[3],ymm4[8],ymm13[8],ymm4[9],ymm13[9],ymm4[10],ymm13[10],ymm4[11],ymm13[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm5, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm13 = ymm15[0],ymm13[0],ymm15[1],ymm13[1],ymm15[2],ymm13[2],ymm15[3],ymm13[3],ymm15[8],ymm13[8],ymm15[9],ymm13[9],ymm15[10],ymm13[10],ymm15[11],ymm13[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r9), %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm11[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm0[4],ymm5[4],ymm0[5],ymm5[5],ymm0[6],ymm5[6],ymm0[7],ymm5[7],ymm0[12],ymm5[12],ymm0[13],ymm5[13],ymm0[14],ymm5[14],ymm0[15],ymm5[15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm15[u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm23, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm2, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm8, %ymm27
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm0[1,1,1,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm15[0],ymm11[0],ymm15[1],ymm11[1],ymm15[2],ymm11[2],ymm15[3],ymm11[3],ymm15[8],ymm11[8],ymm15[9],ymm11[9],ymm15[10],ymm11[10],ymm15[11],ymm11[11]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm1[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm3 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm3 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm2[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm0, %zmm3, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm8, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm0 = ymm1[4],ymm4[4],ymm1[5],ymm4[5],ymm1[6],ymm4[6],ymm1[7],ymm4[7],ymm1[12],ymm4[12],ymm1[13],ymm4[13],ymm1[14],ymm4[14],ymm1[15],ymm4[15]
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm8 = ymm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm4[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm4 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm4[0],ymm8[0],ymm4[1],ymm8[1],ymm4[2],ymm8[2],ymm4[3],ymm8[3],ymm4[8],ymm8[8],ymm4[9],ymm8[9],ymm4[10],ymm8[10],ymm4[11],ymm8[11]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm15, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm22
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm8 = ymm13[0],ymm8[0],ymm13[1],ymm8[1],ymm13[2],ymm8[2],ymm13[3],ymm8[3],ymm13[8],ymm8[8],ymm13[9],ymm8[9],ymm13[10],ymm8[10],ymm13[11],ymm8[11]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r9), %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm12[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm11 = ymm15[4],ymm11[4],ymm15[5],ymm11[5],ymm15[6],ymm11[6],ymm15[7],ymm11[7],ymm15[12],ymm11[12],ymm15[13],ymm11[13],ymm15[14],ymm11[14],ymm15[15],ymm11[15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm13[u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm12[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm0, %ymm24, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[3,3,3,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm4, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm13, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm1, %zmm4 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vextracti64x4 $1, %zmm4, %ymm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm27 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [0,13,2,3,12,5,6,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm1, %ymm22, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[3,3,3,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm8, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm4, %zmm1 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm2, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm8, %zmm20, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r9), %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm8[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm13[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm8[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [12,1,2,13,4,5,14,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm2, %ymm6, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm6[4],ymm0[4],ymm6[5],ymm0[5],ymm6[6],ymm0[6],ymm6[7],ymm0[7],ymm6[12],ymm0[12],ymm6[13],ymm0[13],ymm6[14],ymm0[14],ymm6[15],ymm0[15]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm23, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm13 = ymm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm0[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm11 = ymm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm6[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm11 = ymm11[0],ymm13[0],ymm11[1],ymm13[1],ymm11[2],ymm13[2],ymm11[3],ymm13[3],ymm11[8],ymm13[8],ymm11[9],ymm13[9],ymm11[10],ymm13[10],ymm11[11],ymm13[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm9, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm3[0],ymm13[0],ymm3[1],ymm13[1],ymm3[2],ymm13[2],ymm3[3],ymm13[3],ymm3[8],ymm13[8],ymm3[9],ymm13[9],ymm3[10],ymm13[10],ymm3[11],ymm13[11]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 64(%r9), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm3, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm28 = ymm13[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm13 = ymm15[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm25 = ymm13[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm15 = ymm9[4],ymm6[4],ymm9[5],ymm6[5],ymm9[6],ymm6[6],ymm9[7],ymm6[7],ymm9[12],ymm6[12],ymm9[13],ymm6[13],ymm9[14],ymm6[14],ymm9[15],ymm6[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm6, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm24 = ymm9[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[3,3,3,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm11, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm11, %zmm1 {%k2}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm27, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm6, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vextracti64x4 $1, %zmm1, %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm0, %ymm22, %ymm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm2 = ymm0[4],ymm8[4],ymm0[5],ymm8[5],ymm0[6],ymm8[6],ymm0[7],ymm8[7],ymm0[12],ymm8[12],ymm0[13],ymm8[13],ymm0[14],ymm8[14],ymm0[15],ymm8[15]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm2, %ymm24, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %xmm11
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm11, %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm15[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm15 = ymm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm8[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} ymm12 = ymm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm0[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm12 = ymm12[0],ymm15[0],ymm12[1],ymm15[1],ymm12[2],ymm15[2],ymm12[3],ymm15[3],ymm12[8],ymm15[8],ymm12[9],ymm15[9],ymm12[10],ymm15[10],ymm12[11],ymm15[11]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm0, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm8, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm13 = ymm13[0],ymm15[0],ymm13[1],ymm15[1],ymm13[2],ymm15[2],ymm13[3],ymm15[3],ymm13[8],ymm15[8],ymm13[9],ymm15[9],ymm13[10],ymm15[10],ymm13[11],ymm15[11]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm15 = ymm8[4],ymm0[4],ymm8[5],ymm0[5],ymm8[6],ymm0[6],ymm8[7],ymm0[7],ymm8[12],ymm0[12],ymm8[13],ymm0[13],ymm8[14],ymm0[14],ymm8[15],ymm0[15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm11 = ymm0[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,2,2]
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm7[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [12,1,2,13,4,5,14,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm2, %ymm7, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm6[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm2, %ymm7, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm6, %ymm27
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,2]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[3,3,3,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm12, %zmm12
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm13, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm12, %zmm2 {%k2}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm10, %ymm8, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm12, %zmm2, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm8[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm12, %ymm6, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm8, %ymm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm22
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm0, %ymm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm8, %ymm0, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsp), %xmm9 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm9[0],xmm6[0],xmm9[1],xmm6[1],xmm9[2],xmm6[2],xmm9[3],xmm6[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm0, %ymm30, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm9 = xmm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm0, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm10, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm8, %xmm15
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm15[4],xmm9[4],xmm15[5],xmm9[5],xmm15[6],xmm9[6],xmm15[7],xmm9[7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm2 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm13[2,1,3,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm7, %zmm17, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm9[0],xmm6[0],xmm9[1],xmm6[1],xmm9[2],xmm6[2],xmm9[3],xmm6[3]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm7, %ymm30, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm9 = xmm9[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm9 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm9 = zmm18[0,1,2,3],mem[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm15 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm9, %zmm18, %zmm15
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm9 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm9 = zmm16[0,1,2,3],mem[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm16 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm9, %zmm18, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm31[0,1,2,3],zmm19[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm28, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm9, %zmm18, %zmm19
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm20[0,1,2,3],zmm21[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm24, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm9, %zmm18, %zmm11
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm9 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm12[0,1,2,3],zmm9[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm12 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm9, %zmm18, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm9 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm14[0,1,2,3],zmm9[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm14 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm9, %zmm18, %zmm14
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm5[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm27[2,2,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm22[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm5, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm4, %zmm18, %zmm5
-; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm20 # 16-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # xmm20 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm8, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsp), %xmm10 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm0, %ymm19, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm12 = xmm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm13 = xmm10[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm12, %ymm12
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm4, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm7, %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm5, %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm13 = xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm5, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm0, %zmm5 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm6[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %zmm7, %zmm23, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm10[0,0,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm9, %ymm21
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm10, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm7, %ymm19, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm12 = xmm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpsrldq {{.*#+}} xmm13 = xmm10[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm18, %zmm13 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm13 = zmm18[0,1,2,3],mem[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm15 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm13, %zmm18, %zmm15
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm16, %zmm13 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm13 = zmm16[0,1,2,3],mem[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm16 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm13, %zmm18, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm27[0,1,2,3],zmm21[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm19 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm13, %zmm18, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm17[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm26, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm18, %zmm11
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm3 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm3 = zmm25[0,1,2,3],mem[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm13 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm17, %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm14[0,1,2,3],zmm29[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm14 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm17, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm4[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm24[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm22[2,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm17, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm18 # 16-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # xmm18 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm12, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm7, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm8, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm9[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm12, %ymm21
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm9, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm29 = ymm9[0,0,2,1]
 ; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm22 # 16-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # xmm22 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm4, %zmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm3[0,0,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm8, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm8[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm7, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm8, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # ymm24 = mem[2,2,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq $230, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # ymm25 = mem[2,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm27 = xmm13[0],zero,xmm13[1],zero,xmm13[2],zero,xmm13[3],zero
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm7, %zmm4 {%k1}
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm6[2,1,3,3,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm7, %zmm4, %zmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm13[0,0,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm7, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm13, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm28 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm26 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa32 %zmm4, %zmm1 {%k1}
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm9[2,1,3,3,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm4, %zmm1, %zmm23
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm6[0,0,2,1,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm6, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm23, %zmm0, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm23[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm24, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm18, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,1,8,3,4,9,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm20, %ymm1, %ymm29
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm22, %ymm1, %ymm26
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm27, %ymm1, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm28, %ymm1, %ymm4
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm1 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm1 = zmm29[0,1,2,3],mem[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm21, %zmm10
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm6[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm6, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm27 = xmm9[0],zero,xmm9[1],zero,xmm9[2],zero,xmm9[3],zero
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm9, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,0,2,1,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq %xmm9, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,0,2,1]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm20[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm24, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm17, %zmm20
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,8,3,4,9,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm18, %ymm2, %ymm30
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm22, %ymm2, %ymm31
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm26, %ymm2, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vpermt2d %ymm27, %ymm2, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm30, %zmm2 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm2 = zmm30[0,1,2,3],mem[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm29, %zmm21, %zmm17
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm18, %zmm10
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm1 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm1 = zmm26[0,1,2,3],mem[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm8, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm18, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,2,3],zmm0[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm7, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm18, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm4[0,1,2,3],zmm17[0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm6, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm18, %zmm17
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm31[0,1,2,3],zmm28[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm18, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm5[0,1,2,3],zmm0[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm4, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm18, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm23[0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm9, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm18, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 192(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, 128(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm5, 320(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, (%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, 192(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm20, 128(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 320(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm11, 256(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm19, 448(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 384(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm10, 576(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm6, 384(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm17, 576(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm14, 512(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm12, 704(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm13, 704(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 640(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm15, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $936, %rsp # imm = 0x3A8
+; AVX512F-ONLY-FAST-NEXT:    addq $1064, %rsp # imm = 0x428
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
@@ -8965,467 +8958,461 @@ define void @store_i16_stride6_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512DQ-FAST-LABEL: store_i16_stride6_vf64:
 ; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $936, %rsp # imm = 0x3A8
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rcx), %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdx), %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rcx), %ymm12
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %ymm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 96(%rcx), %xmm19
-; AVX512DQ-FAST-NEXT:    vmovdqa64 96(%rdx), %xmm17
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %xmm7
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %xmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rcx), %xmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm8
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovaps 32(%rdx), %xmm0
-; AVX512DQ-FAST-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    subq $920, %rsp # imm = 0x398
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rcx), %ymm8
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdx), %ymm6
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rcx), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rcx), %xmm7
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdx), %xmm14
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %xmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rcx), %xmm11
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %xmm12
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm12, (%rsp) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdx), %xmm15
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm17, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm19, %xmm5
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm6[0],ymm12[0],ymm6[1],ymm12[1],ymm6[2],ymm12[2],ymm6[3],ymm12[3],ymm6[8],ymm12[8],ymm6[9],ymm12[9],ymm6[10],ymm12[10],ymm6[11],ymm12[11]
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, %xmm11
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm13[4],xmm0[5],xmm13[5],xmm0[6],xmm13[6],xmm0[7],xmm13[7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %ymm14
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %ymm15
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm15[0],ymm14[0],ymm15[1],ymm14[1],ymm15[2],ymm14[2],ymm15[3],ymm14[3],ymm15[8],ymm14[8],ymm15[9],ymm14[9],ymm15[10],ymm14[10],ymm15[11],ymm14[11]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm0[4],xmm9[4],xmm0[5],xmm9[5],xmm0[6],xmm9[6],xmm0[7],xmm9[7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm7
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rsi), %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm9, %ymm7
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm8
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm8[0],ymm7[0],ymm8[1],ymm7[1],ymm8[2],ymm7[2],ymm8[3],ymm7[3],ymm8[8],ymm7[8],ymm8[9],ymm7[9],ymm8[10],ymm7[10],ymm8[11],ymm7[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm0[4],ymm9[4],ymm0[5],ymm9[5],ymm0[6],ymm9[6],ymm0[7],ymm9[7],ymm0[12],ymm9[12],ymm0[13],ymm9[13],ymm0[14],ymm9[14],ymm0[15],ymm9[15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[3,3,3,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm31 = [5,6,5,6,5,6,7,7]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm9 = ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm9, %ymm31, %ymm9
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm0 = ymm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm3[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm4 = ymm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm4[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm20
-; AVX512DQ-FAST-NEXT:    movw $18724, %ax # imm = 0x4924
-; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm9, %zmm0, %zmm20 {%k1}
-; AVX512DQ-FAST-NEXT:    vextracti64x4 $1, %zmm20, %ymm21
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [0,13,2,3,12,5,6,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r8), %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm0
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm23, %ymm21
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [12,1,2,13,4,5,14,7]
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm4[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm22, %ymm20
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r9), %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm0
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm0
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rsi), %ymm3
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm6[0],ymm8[0],ymm6[1],ymm8[1],ymm6[2],ymm8[2],ymm6[3],ymm8[3],ymm6[8],ymm8[8],ymm6[9],ymm8[9],ymm6[10],ymm8[10],ymm6[11],ymm8[11]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm14[4],xmm7[4],xmm14[5],xmm7[5],xmm14[6],xmm7[6],xmm14[7],xmm7[7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm15[4],xmm11[4],xmm15[5],xmm11[5],xmm15[6],xmm11[6],xmm15[7],xmm11[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm15, %xmm28
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %ymm13
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %ymm2
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm2[0],ymm13[0],ymm2[1],ymm13[1],ymm2[2],ymm13[2],ymm2[3],ymm13[3],ymm2[8],ymm13[8],ymm2[9],ymm13[9],ymm2[10],ymm13[10],ymm2[11],ymm13[11]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm24
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm10, %xmm18
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm9, %zmm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rsi), %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <u,u,u,u,4,5,10,11,u,u,u,u,u,u,u,u,24,25,22,23,20,21,26,27,u,u,u,u,u,u,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm2, %ymm9
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %ymm3
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm3, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm4, %ymm7
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm7[0],ymm0[0],ymm7[1],ymm0[1],ymm7[2],ymm0[2],ymm7[3],ymm0[3],ymm7[8],ymm0[8],ymm7[9],ymm0[9],ymm7[10],ymm0[10],ymm7[11],ymm0[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm7 = ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[3,3,3,3]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm6[4],ymm12[4],ymm6[5],ymm12[5],ymm6[6],ymm12[6],ymm6[7],ymm12[7],ymm6[12],ymm12[12],ymm6[13],ymm12[13],ymm6[14],ymm12[14],ymm6[15],ymm12[15]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm8, %ymm31, %ymm8
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm9 = ymm12[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm12[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm3, %ymm10
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm9 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[8],ymm9[8],ymm10[9],ymm9[9],ymm10[10],ymm9[10],ymm10[11],ymm9[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm10 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[3,3,3,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [5,6,5,6,5,6,7,7]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm12 = ymm6[4],ymm8[4],ymm6[5],ymm8[5],ymm6[6],ymm8[6],ymm6[7],ymm8[7],ymm6[12],ymm8[12],ymm6[13],ymm8[13],ymm6[14],ymm8[14],ymm6[15],ymm8[15]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm12, %ymm17, %ymm12
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm8 = ymm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm8[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm6 = ymm6[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm6[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[8],ymm9[8],ymm6[9],ymm9[9],ymm6[10],ymm9[10],ymm6[11],ymm9[11]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm6[0],ymm8[0],ymm6[1],ymm8[1],ymm6[2],ymm8[2],ymm6[3],ymm8[3],ymm6[8],ymm8[8],ymm6[9],ymm8[9],ymm6[10],ymm8[10],ymm6[11],ymm8[11]
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm27
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm8, %zmm6, %zmm27 {%k1}
-; AVX512DQ-FAST-NEXT:    vextracti64x4 $1, %zmm27, %ymm28
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r8), %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm29
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm23, %ymm28
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm4[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm22, %ymm27
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r9), %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm30
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm21
+; AVX512DQ-FAST-NEXT:    movw $18724, %ax # imm = 0x4924
+; AVX512DQ-FAST-NEXT:    kmovw %eax, %k1
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm12, %zmm6, %zmm21 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [8,21,10,11,20,13,14,23]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, %zmm23
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r8), %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <u,u,u,u,u,u,u,u,14,15,14,15,14,15,14,15,28,29,26,27,26,27,30,31,30,31,30,31,30,31,30,31>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm2, %ymm6
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm6, %zmm20, %zmm23
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [12,1,2,13,4,5,14,7]
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm6, %ymm19, %ymm21
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r9), %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,16,17,20,21,20,21,22,23,24,25,24,25,24,25,24,25>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm6
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm6[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,u,u,u,u,u,u,8,9,10,11,12,13,14,15,24,25,28,29,28,29,26,27,24,25,26,27,28,29,30,31>
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm2, %ymm6
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm6[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rsi), %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm2, %ymm6
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm3, %ymm8
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm6 = ymm8[0],ymm6[0],ymm8[1],ymm6[1],ymm8[2],ymm6[2],ymm8[3],ymm6[3],ymm8[8],ymm6[8],ymm8[9],ymm6[9],ymm8[10],ymm6[10],ymm8[11],ymm6[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[3,3,3,3]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm9 = ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm9, %ymm17, %ymm9
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm10 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm0 = ymm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm0[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm10[0],ymm0[1],ymm10[1],ymm0[2],ymm10[2],ymm0[3],ymm10[3],ymm0[8],ymm10[8],ymm0[9],ymm10[9],ymm0[10],ymm10[10],ymm0[11],ymm10[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm27
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm9, %zmm0, %zmm27 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, %zmm29
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r8), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm0, %zmm20, %zmm29
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm1[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm19, %ymm27
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r9), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm31
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm18
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm4, %ymm22
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rsi), %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm6
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm3, %xmm7
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%rdi), %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm8
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7]
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm16 = [1,0,2,2,1,0,2,2]
-; AVX512DQ-FAST-NEXT:    # ymm16 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm19, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm17, %xmm4
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm8, %ymm16, %ymm8
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm3, %ymm3
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm7, %zmm24
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [1,0,2,2,1,0,2,2]
+; AVX512DQ-FAST-NEXT:    # ymm10 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm14, %xmm0
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm9 = xmm14[0],xmm7[0],xmm14[1],xmm7[1],xmm14[2],xmm7[2],xmm14[3],xmm7[3]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm9, %ymm10, %ymm9
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm14 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm7, %ymm7
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm8, %zmm25
 ; AVX512DQ-FAST-NEXT:    movw $9362, %ax # imm = 0x2492
 ; AVX512DQ-FAST-NEXT:    kmovw %eax, %k2
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm3, %zmm8, %zmm24 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [16,9,10,17,12,13,18,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, %zmm25
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm7, %zmm9, %zmm25 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [16,9,10,17,12,13,18,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm25, %zmm26
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r8), %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm0[2,1,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm3, %zmm17, %zmm25
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm0[2,1,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm6, %zmm16, %zmm26
 ; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rsi), %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm0, %xmm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa 64(%rdi), %xmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm1, %xmm5
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm11[0],xmm13[0],xmm11[1],xmm13[1],xmm11[2],xmm13[2],xmm11[3],xmm13[3]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm8, %ymm16, %ymm8
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm13 = xmm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm11 = xmm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm11 = xmm11[0],xmm13[0],xmm11[1],xmm13[1],xmm11[2],xmm13[2],xmm11[3],xmm13[3]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm11, %ymm13
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm5, %zmm11
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm13, %zmm8, %zmm11 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm26
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm14
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm14[4],xmm6[4],xmm14[5],xmm6[5],xmm14[6],xmm6[6],xmm14[7],xmm6[7]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm28, %xmm2
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm1, %ymm10, %ymm1
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm11[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm14, %zmm11
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm0, %zmm1, %zmm11 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm11, %zmm28
 ; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r8), %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm0[2,1,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm3, %zmm17, %zmm26
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm4
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm4, %ymm5
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm5[0],ymm3[0],ymm5[1],ymm3[1],ymm5[2],ymm3[2],ymm5[3],ymm3[3],ymm5[8],ymm3[8],ymm5[9],ymm3[9],ymm5[10],ymm3[10],ymm5[11],ymm3[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[3,3,3,3]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm15[4],ymm14[4],ymm15[5],ymm14[5],ymm15[6],ymm14[6],ymm15[7],ymm14[7],ymm15[12],ymm14[12],ymm15[13],ymm14[13],ymm15[14],ymm14[14],ymm15[15],ymm14[15]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm8, %ymm31, %ymm8
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm1 = ymm14[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm14[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm2 = ymm15[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm15[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm0
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm8, %zmm1, %zmm0 {%k1}
-; AVX512DQ-FAST-NEXT:    vextracti64x4 $1, %zmm0, %ymm9
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %ymm2
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[2,1,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm0, %zmm16, %zmm28
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm13
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm2, %ymm1
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm23, %ymm9
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm2[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm22, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm14
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm2, %ymm1
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm1[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm15
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm2, %ymm1
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm19 = ymm1[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %ymm5
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm5, %ymm3
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm2, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm3, %ymm1
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm1 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[3,3,3,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm2
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm13[4],ymm2[5],ymm13[5],ymm2[6],ymm13[6],ymm2[7],ymm13[7],ymm2[12],ymm13[12],ymm2[13],ymm13[13],ymm2[14],ymm13[14],ymm2[15],ymm13[15]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm5, %ymm17, %ymm5
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm4 = ymm13[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm13[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm6 = ymm2[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm2[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm6[0],ymm4[0],ymm6[1],ymm4[1],ymm6[2],ymm4[2],ymm6[3],ymm4[3],ymm6[8],ymm4[8],ymm6[9],ymm4[9],ymm6[10],ymm4[10],ymm6[11],ymm4[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm13
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm5, %zmm4, %zmm13 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm0, %zmm20, %zmm4
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm1[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm0, %ymm19, %ymm13
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm14
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm31 = ymm0[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm9
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm24 = ymm0[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %ymm3
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm3, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm10, %ymm2, %ymm6
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm3 = ymm6[0],ymm3[0],ymm6[1],ymm3[1],ymm6[2],ymm3[2],ymm6[3],ymm3[3],ymm6[8],ymm3[8],ymm6[9],ymm3[9],ymm6[10],ymm3[10],ymm6[11],ymm3[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm6 = ymm2[4],ymm5[4],ymm2[5],ymm5[5],ymm2[6],ymm5[6],ymm2[7],ymm5[7],ymm2[12],ymm5[12],ymm2[13],ymm5[13],ymm2[14],ymm5[14],ymm2[15],ymm5[15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[3,3,3,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm2, %ymm5
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm0 = ymm5[0],ymm0[0],ymm5[1],ymm0[1],ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[8],ymm0[8],ymm5[9],ymm0[9],ymm5[10],ymm0[10],ymm5[11],ymm0[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[3,3,3,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu (%rsp), %ymm4 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm8 = ymm1[4],ymm4[4],ymm1[5],ymm4[5],ymm1[6],ymm4[6],ymm1[7],ymm4[7],ymm1[12],ymm4[12],ymm1[13],ymm4[13],ymm1[14],ymm4[14],ymm1[15],ymm4[15]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm8, %ymm31, %ymm8
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm10 = ymm4[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm4[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm4 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm4 = ymm4[0],ymm10[0],ymm4[1],ymm10[1],ymm4[2],ymm10[2],ymm4[3],ymm10[3],ymm4[8],ymm10[8],ymm4[9],ymm10[9],ymm4[10],ymm10[10],ymm4[11],ymm10[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm3, %zmm31
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm8, %zmm4, %zmm31 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm7
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm13, %ymm7, %ymm4
-; AVX512DQ-FAST-NEXT:    vextracti64x4 $1, %zmm31, %ymm6
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm4, %ymm23, %ymm6
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm7[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm4, %ymm22, %ymm31
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} ymm6 = ymm8[4],ymm1[4],ymm8[5],ymm1[5],ymm8[6],ymm1[6],ymm8[7],ymm1[7],ymm8[12],ymm1[12],ymm8[13],ymm1[13],ymm8[14],ymm1[14],ymm8[15],ymm1[15]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm6, %ymm17, %ymm6
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm7 = ymm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm1[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} ymm15 = ymm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,ymm8[22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm7 = ymm15[0],ymm7[0],ymm15[1],ymm7[1],ymm15[2],ymm7[2],ymm15[3],ymm7[3],ymm15[8],ymm7[8],ymm15[9],ymm7[9],ymm15[10],ymm7[10],ymm15[11],ymm7[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm6, %zmm7, %zmm0 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm15
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm15, %ymm5
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm5, %zmm0, %zmm20
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm5 = ymm15[2,1,3,3,4,5,6,7,10,9,11,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm5, %ymm19, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm4
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm23 = ymm4[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm15, %ymm1, %ymm4
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm18 = ymm4[2,1,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %xmm14
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm14, %xmm8
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm4, %xmm10
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm8[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm13, %ymm16, %ymm13
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm3[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm15 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm8, %ymm8
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm22, %zmm10, %zmm10
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm8, %zmm13, %zmm10 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm10, %zmm22
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm5
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm5[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm1, %ymm5
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm19 = ymm5[2,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %xmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm6, %xmm7
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm5, %xmm14
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm7 = xmm14[4],xmm7[4],xmm14[5],xmm7[5],xmm14[6],xmm7[6],xmm14[7],xmm7[7]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm17 = ymm7[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm14 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm18, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rsp), %xmm8 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm12, %ymm10, %ymm12
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm9 = xmm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm9[0],xmm7[0],xmm9[1],xmm7[1],xmm9[2],xmm7[2],xmm9[3],xmm7[3]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm7, %ymm7
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm14, %zmm17
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm7, %zmm12, %zmm17 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm17, %zmm18
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %xmm1
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm1[2,1,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm1, %xmm29
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm13, %zmm17, %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %xmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm15
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm13, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm15, %xmm12
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm1[2,1,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm1, %xmm30
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm9, %zmm16, %zmm18
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %xmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm14
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,2,3,4,5,6,7,8,9,6,7,4,5,10,11]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm9, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm7, %xmm14, %xmm12
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm12[4],xmm1[4],xmm12[5],xmm1[5],xmm12[6],xmm1[6],xmm12[7],xmm1[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm1[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm12, %ymm16, %ymm12
-; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm1[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm12, %ymm10, %ymm12
+; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm10 = xmm7[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FAST-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm8[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm8, %zmm16
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm12, %zmm16 {%k2}
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %xmm3
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm3[2,1,3,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm1, %zmm16, %zmm17
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm15[4],xmm13[4],xmm15[5],xmm13[5],xmm15[6],xmm13[6],xmm15[7],xmm13[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm1[1,1,1,1]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm5[0],ymm2[1],ymm5[1],ymm2[2],ymm5[2],ymm2[3],ymm5[3],ymm2[8],ymm5[8],ymm2[9],ymm5[9],ymm2[10],ymm5[10],ymm2[11],ymm5[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm1[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm10 = xmm14[0],xmm9[0],xmm14[1],xmm9[1],xmm14[2],xmm9[2],xmm14[3],xmm9[3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm8, %ymm8
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm10, %zmm10
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm8, %zmm12, %zmm10 {%k2}
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %xmm12
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm12[2,1,3,3,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm1, %zmm10, %zmm16
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm14[4],xmm9[4],xmm14[5],xmm9[5],xmm14[6],xmm9[6],xmm14[7],xmm9[7]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm1[1,1,1,1]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm1[2,2,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # xmm1 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm1[1,1,1,1]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm1[1,1,1,1]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # ymm1 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[2],mem[2],ymm1[3],mem[3],ymm1[8],mem[8],ymm1[9],mem[9],ymm1[10],mem[10],ymm1[11],mem[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm1[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm1[2,2,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # xmm1 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm1[1,1,1,1]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm1[1,1,1,1]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm1 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[2],mem[2],ymm1[3],mem[3],ymm1[8],mem[8],ymm1[9],mem[9],ymm1[10],mem[10],ymm1[11],mem[11]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm20[0,1,2,3],zmm2[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm20 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm20, %zmm20 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm21, %zmm20
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm28, %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm27[0,1,2,3],zmm2[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm27, %zmm27 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm21, %zmm27
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm2[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm30, %zmm19
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm0, %zmm21, %zmm19
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm0
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm31[0,1,2,3],zmm0[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm23, %zmm18
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm6, %zmm21, %zmm18
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm3 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm3 = ymm1[0],mem[0],ymm1[1],mem[1],ymm1[2],mem[2],ymm1[3],mem[3],ymm1[8],mem[8],ymm1[9],mem[9],ymm1[10],mem[10],ymm1[11],mem[11]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm21[0,1,2,3],zmm23[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm21 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm23, %zmm1, %zmm21
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm27 = zmm27[0,1,2,3],zmm29[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm23 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm27, %zmm1, %zmm23
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],zmm4[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm31, %zmm24
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm13, %zmm1, %zmm24
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm0[0,1,2,3],zmm20[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm22, %zmm19
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm13, %zmm1, %zmm19
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [0,1,8,3,4,9,6,7]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm6, %ymm9, %ymm24
-; AVX512DQ-FAST-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # xmm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm6, %ymm9, %ymm11
-; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm6 = xmm29[0],zero,xmm29[1],zero,xmm29[2],zero,xmm29[3],zero
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm6, %ymm9, %ymm10
-; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm6 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm6, %ymm9, %ymm16
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = <1,2,1,2,u,u,3,3,13,12,10,10,13,12,14,14>
-; AVX512DQ-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm9 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm5, %zmm8, %zmm9 {%k1}
-; AVX512DQ-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm5 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm15, %zmm13, %zmm5 {%k1}
-; AVX512DQ-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm8 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm1, %zmm12, %zmm8 {%k1}
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm7[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [0,1,8,3,4,9,6,7]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm13, %ymm25
+; AVX512DQ-FAST-NEXT:    vpmovzxwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm13, %ymm11
+; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm30[0],zero,xmm30[1],zero,xmm30[2],zero,xmm30[3],zero
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm13, %ymm17
+; AVX512DQ-FAST-NEXT:    vpmovzxwd {{.*#+}} xmm1 = xmm12[0],zero,xmm12[1],zero,xmm12[2],zero,xmm12[3],zero
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm13, %ymm10
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <1,2,1,2,u,u,3,3,13,12,10,10,13,12,14,14>
+; AVX512DQ-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm13 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm14, %zmm9, %zmm13 {%k1}
+; AVX512DQ-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm9 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm2, %zmm8, %zmm9 {%k1}
+; AVX512DQ-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm2 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm3, %zmm7, %zmm2 {%k1}
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm15[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [8,9,20,11,12,21,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm12
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm1, %zmm7, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [0,9,2,3,8,5,6,11]
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm3, %ymm21, %ymm9
-; AVX512DQ-FAST-NEXT:    vpshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm3 = mem[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, %zmm15
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm3, %zmm7, %zmm15
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm24[0,1,2,3],zmm25[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm1, %xmm25
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm2, %ymm21, %ymm5
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm13, %zmm8
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm3, %zmm7, %zmm8
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [12,13,10,11,10,11,14,15,14,15,14,15,14,15,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm12, %xmm12
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [0,9,2,3,8,5,6,11]
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm12, %ymm20, %ymm13
+; AVX512DQ-FAST-NEXT:    vpshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm12 = mem[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, %zmm15
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm12, %zmm7, %zmm15
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm12 = zmm25[0,1,2,3],zmm26[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm3, %xmm26
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm4, %ymm20, %ymm9
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm4 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm4 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11]
-; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r9), %xmm14
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm14[0,0,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm5 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm5 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[2],mem[2],ymm0[3],mem[3],ymm0[8],mem[8],ymm0[9],mem[9],ymm0[10],mem[10],ymm0[11],mem[11]
+; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r9), %xmm6
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm6[0,0,2,1,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm14, %xmm1
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm28
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm24 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm3, %zmm24, %zmm28
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm11[0,1,2,3],zmm26[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[1,1,1,1]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm4, %zmm2, %zmm6 {%k1}
-; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r9), %xmm2
-; AVX512DQ-FAST-NEXT:    vpshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm4 = mem[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm8, %zmm11
-; AVX512DQ-FAST-NEXT:    vpermt2d %zmm4, %zmm7, %zmm11
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm2[0,0,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm4, %ymm4
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm2, %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = [0,1,4,5,4,5,6,7,8,9,8,9,8,9,8,9]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm6, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm27
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm12, %zmm25, %zmm27
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm11[0,1,2,3],zmm28[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpermd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[1,1,1,1]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti32x8 $1, %ymm5, %zmm4, %zmm1 {%k1}
+; AVX512DQ-FAST-NEXT:    vmovdqa 64(%r9), %xmm4
+; AVX512DQ-FAST-NEXT:    vpshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm5 = mem[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, %zmm12
+; AVX512DQ-FAST-NEXT:    vpermt2d %zmm5, %zmm7, %zmm12
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm4[0,0,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm5, %ymm5
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm11
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm5, %zmm5
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm25, %xmm26
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm25, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm4
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm4, %ymm21, %ymm8
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %xmm4
-; AVX512DQ-FAST-NEXT:    vpshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm1 = mem[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm1, %zmm6, %zmm7
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[0,0,2,1,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm1, %ymm25
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm29, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm26, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm26, %xmm28
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm26, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm11
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm11, %ymm20, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %xmm11
+; AVX512DQ-FAST-NEXT:    vpshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm3 = mem[0,1,1,3,4,5,6,7,8,9,9,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm3, %zmm1, %zmm7
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm11[0,0,2,1,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm3, %ymm26
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm30, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm28, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %xmm0
-; AVX512DQ-FAST-NEXT:    vpermt2d %ymm1, %ymm21, %ymm6
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm4, %xmm1
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm21 = ymm1[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm0, %xmm13
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm14, %xmm14
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm1
+; AVX512DQ-FAST-NEXT:    vpermt2d %ymm3, %ymm20, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm3
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm3[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm0, %xmm14
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = [8,9,12,13,12,13,10,11,8,9,10,11,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm6, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm4, %xmm4
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm11, %xmm11
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm0, %xmm3
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpbroadcastq %xmm0, %ymm0
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm1[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpshuflw $96, (%rsp), %ymm1 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm1 = mem[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm29 = ymm14[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm14 = mem[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm2[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm2 = mem[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm31 = ymm4[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,2,1]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm28 = ymm3[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm3 = mem[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm29 = ymm6[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm6 = mem[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm4[0,1,0,1]
 ; AVX512DQ-FAST-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # ymm4 = mem[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,2]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm23, %zmm24, %zmm3
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm10[0,1,2,3],zmm22[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm25, %zmm21
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm10, %zmm24, %zmm21
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm16[0,1,2,3],zmm17[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm0
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm10, %zmm24, %zmm0
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm9 = zmm9[0,1,2,3],zmm12[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm26, %zmm1
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm31 = ymm11[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpshuflw $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm11 = mem[0,0,2,1,4,5,6,7,8,8,10,9,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,2,2]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm22, %zmm25, %zmm5
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm17 = zmm17[0,1,2,3],zmm18[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm26, %zmm18
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm17, %zmm25, %zmm18
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm10 = zmm10[0,1,2,3],zmm16[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm10, %zmm25, %zmm0
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm13[0,1,2,3],zmm8[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm28, %zmm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm9, %zmm10, %zmm1
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm15[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm29, %zmm9
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm10, %zmm9
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm11[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm30, %zmm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm10, %zmm2
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm6[0,1,2,3],zmm7[0,1,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm31, %zmm4
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm5, %zmm10, %zmm4
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm8, %zmm10, %zmm3
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm9[0,1,2,3],zmm15[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm29, %zmm6
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm8, %zmm10, %zmm6
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm12[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm30, %zmm4
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm10, %zmm4
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm7[0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm31, %zmm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm1, %zmm10, %zmm2
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, 256(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 448(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm9, 640(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 64(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, 256(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, 448(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm6, 640(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, 64(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, 192(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, 128(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, 320(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, 384(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm28, 576(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, 512(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm20, 704(%rax)
-; AVX512DQ-FAST-NEXT:    addq $936, %rsp # imm = 0x3A8
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, 192(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm19, 128(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, 320(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm5, 384(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm27, 576(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, 512(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, 704(%rax)
+; AVX512DQ-FAST-NEXT:    addq $920, %rsp # imm = 0x398
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
index 21224a088db94..bcabcd9da92ab 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
@@ -750,15 +750,9 @@ define void @vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4(ptr %in.
 ; AVX512BW-SLOW-LABEL: vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4:
 ; AVX512BW-SLOW:       # %bb.0:
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,9,0,11,0,13,0,15]
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,9,0,11,4,5,6,7]
-; AVX512BW-SLOW-NEXT:    vpermw %zmm0, %zmm1, %zmm1
-; AVX512BW-SLOW-NEXT:    vmovd %xmm0, %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %eax, %xmm1, %xmm1
-; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm0[5],xmm1[6,7]
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %eax, %xmm1, %xmm1
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6],xmm0[7]
+; AVX512BW-SLOW-NEXT:    vpermw %zmm0, %zmm1, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
@@ -767,16 +761,13 @@ define void @vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4(ptr %in.
 ; AVX512BW-FAST-LABEL: vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4:
 ; AVX512BW-FAST:       # %bb.0:
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,9,0,11,0,13,6,7]
 ; AVX512BW-FAST-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,9,0,3,4,5,6,7]
 ; AVX512BW-FAST-NEXT:    vpermw %zmm0, %zmm1, %zmm1
-; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4,5,6,7]
 ; AVX512BW-FAST-NEXT:    vmovd %xmm0, %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %eax, %xmm1, %xmm0
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm2[5],xmm0[6,7]
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7]
+; AVX512BW-FAST-NEXT:    vpinsrw $6, %eax, %xmm1, %xmm1
+; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5,6],xmm0[7]
 ; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper
@@ -877,8 +868,8 @@ define void @vec128_i16_widen_to_i64_factor4_broadcast_to_v2i64_factor2(ptr %in.
 ; AVX512BW-SLOW-LABEL: vec128_i16_widen_to_i64_factor4_broadcast_to_v2i64_factor2:
 ; AVX512BW-SLOW:       # %bb.0:
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512BW-SLOW-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,9,10,11,0,13,6,7]
+; AVX512BW-SLOW-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vpermw %zmm0, %zmm1, %zmm1
 ; AVX512BW-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512BW-SLOW-NEXT:    vpblendd {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
@@ -890,8 +881,8 @@ define void @vec128_i16_widen_to_i64_factor4_broadcast_to_v2i64_factor2(ptr %in.
 ; AVX512BW-FAST-LABEL: vec128_i16_widen_to_i64_factor4_broadcast_to_v2i64_factor2:
 ; AVX512BW-FAST:       # %bb.0:
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm0
-; AVX512BW-FAST-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [0,9,10,11,0,5,6,7]
+; AVX512BW-FAST-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vpermw %zmm0, %zmm1, %zmm1
 ; AVX512BW-FAST-NEXT:    vextracti128 $1, %ymm0, %xmm0
 ; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4],xmm0[5,6,7]
@@ -2040,11 +2031,10 @@ define void @vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4(ptr %in.
 ; AVX512BW-FAST-LABEL: vec256_i32_widen_to_i64_factor2_broadcast_to_v4i64_factor4:
 ; AVX512BW-FAST:       # %bb.0:
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [0,25,0,27,0,29,0,31]
 ; AVX512BW-FAST-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,9,0,11,0,13,0,15]
-; AVX512BW-FAST-NEXT:    vpermi2d %ymm1, %ymm0, %ymm2
-; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm2, %zmm0
+; AVX512BW-FAST-NEXT:    vpermt2d %zmm0, %zmm1, %zmm0
+; AVX512BW-FAST-NEXT:    vpaddb (%rdx), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rcx)
 ; AVX512BW-FAST-NEXT:    vzeroupper
 ; AVX512BW-FAST-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
index fed93faa47b3f..88051de876c09 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast_from_memory.ll
@@ -643,14 +643,8 @@ define void @vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4(ptr %in.
 ;
 ; AVX512BW-SLOW-LABEL: vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4:
 ; AVX512BW-SLOW:       # %bb.0:
-; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,9,0,11,4,5,6,7]
+; AVX512BW-SLOW-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,9,0,11,0,13,0,15]
 ; AVX512BW-SLOW-NEXT:    vpermw (%rdi), %zmm0, %zmm0
-; AVX512BW-SLOW-NEXT:    movl (%rdi), %eax
-; AVX512BW-SLOW-NEXT:    vpinsrw $4, %eax, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5],xmm0[6,7]
-; AVX512BW-SLOW-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
-; AVX512BW-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
 ; AVX512BW-SLOW-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-SLOW-NEXT:    vmovdqa64 %zmm0, (%rdx)
 ; AVX512BW-SLOW-NEXT:    vzeroupper
@@ -658,15 +652,10 @@ define void @vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4(ptr %in.
 ;
 ; AVX512BW-FAST-LABEL: vec128_i16_widen_to_i32_factor2_broadcast_to_v4i32_factor4:
 ; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,9,0,3,4,5,6,7]
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,9,0,11,0,13,6,7]
 ; AVX512BW-FAST-NEXT:    vpermw (%rdi), %zmm0, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3],xmm0[4,5,6,7]
-; AVX512BW-FAST-NEXT:    movl (%rdi), %eax
-; AVX512BW-FAST-NEXT:    vpinsrw $4, %eax, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5],xmm0[6,7]
-; AVX512BW-FAST-NEXT:    vpinsrw $6, %eax, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm1[7]
+; AVX512BW-FAST-NEXT:    vpinsrw $6, (%rdi), %xmm0, %xmm0
+; AVX512BW-FAST-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],mem[7]
 ; AVX512BW-FAST-NEXT:    vpaddb (%rsi), %zmm0, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rdx)
 ; AVX512BW-FAST-NEXT:    vzeroupper


        


More information about the llvm-commits mailing list