[llvm] 59d14b6 - [X86] combineLoad - try to reuse existing constant pool entries for smaller vector constant data (REAPPLIED)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 20 07:45:03 PST 2023


Author: Simon Pilgrim
Date: 2023-11-20T15:38:48Z
New Revision: 59d14b623357d9b0514a9575e6d92474694b2f74

URL: https://github.com/llvm/llvm-project/commit/59d14b623357d9b0514a9575e6d92474694b2f74
DIFF: https://github.com/llvm/llvm-project/commit/59d14b623357d9b0514a9575e6d92474694b2f74.diff

LOG: [X86] combineLoad - try to reuse existing constant pool entries for smaller vector constant data (REAPPLIED)

If we already have a YMM/ZMM constant that a smaller XMM/YMM has matching lower bits, then ensure we reuse the same constant pool entry.

Extends the similar combines we already have to reuse VBROADCAST_LOAD/SUBV_BROADCAST_LOAD constant loads.

This is a mainly a canonicalization, but should make it easier for us to merge constant loads in a future commit (related to both #70947 and better X86FixupVectorConstantsPass usage for #71078).

Reapplied with fix to ensure we don't 'flip-flop' between multiple matching constants - only perform the fold if the new constant pool entry is larger than the current entry.

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index fbd241a9d8ec07c..7a4fa16edb7de49 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -49875,8 +49875,8 @@ static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
     }
   }
 
-  // If we also broadcast this to a wider type, then just extract the lowest
-  // subvector.
+  // If we also load/broadcast this to a wider type, then just extract the
+  // lowest subvector.
   if (Ext == ISD::NON_EXTLOAD && Subtarget.hasAVX() && Ld->isSimple() &&
       (RegVT.is128BitVector() || RegVT.is256BitVector())) {
     SDValue Ptr = Ld->getBasePtr();
@@ -49884,8 +49884,9 @@ static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
     for (SDNode *User : Chain->uses()) {
       if (User != N &&
           (User->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD ||
-           User->getOpcode() == X86ISD::VBROADCAST_LOAD) &&
-          cast<MemIntrinsicSDNode>(User)->getChain() == Chain &&
+           User->getOpcode() == X86ISD::VBROADCAST_LOAD ||
+           ISD::isNormalLoad(User)) &&
+          cast<MemSDNode>(User)->getChain() == Chain &&
           !User->hasAnyUseOfValue(1) &&
           User->getValueSizeInBits(0).getFixedValue() >
               RegVT.getFixedSizeInBits()) {
@@ -49916,6 +49917,31 @@ static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
             }
           }
         }
+        if (ISD::isNormalLoad(User)) {
+          // See if we are loading a constant that matches in the lower
+          // bits of a longer constant (but from a 
diff erent constant pool ptr).
+          SDValue UserPtr = cast<LoadSDNode>(User)->getBasePtr();
+          const Constant *LdC = getTargetConstantFromBasePtr(Ptr);
+          const Constant *UserC = getTargetConstantFromBasePtr(UserPtr);
+          if (LdC && UserC && UserPtr != Ptr &&
+              LdC->getType()->getPrimitiveSizeInBits() <
+                  UserC->getType()->getPrimitiveSizeInBits()) {
+            APInt Undefs, UserUndefs;
+            SmallVector<APInt> Bits, UserBits;
+            if (getTargetConstantBitsFromNode(SDValue(N, 0), 8, Undefs, Bits) &&
+                getTargetConstantBitsFromNode(SDValue(User, 0), 8, UserUndefs,
+                                              UserBits)) {
+              UserUndefs = UserUndefs.trunc(Undefs.getBitWidth());
+              UserBits.truncate(Bits.size());
+              if (Bits == UserBits && UserUndefs.isSubsetOf(Undefs)) {
+                SDValue Extract = extractSubVector(
+                    SDValue(User, 0), 0, DAG, SDLoc(N), RegVT.getSizeInBits());
+                Extract = DAG.getBitcast(RegVT, Extract);
+                return DCI.CombineTo(N, Extract, SDValue(User, 1));
+              }
+            }
+          }
+        }
       }
     }
   }

diff  --git a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
index 083269b312a45d3..b69d22e04d7deb7 100644
--- a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
+++ b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
@@ -1400,7 +1400,7 @@ define <4 x i64> @f4xi64_i128(<4 x i64> %a) {
 ; AVX-64-LABEL: f4xi64_i128:
 ; AVX-64:       # %bb.0:
 ; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0]
+; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1]
 ; AVX-64-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
 ; AVX-64-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
 ; AVX-64-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -1535,7 +1535,7 @@ define <8 x i64> @f8xi64_i256(<8 x i64> %a) {
 ; AVX-64-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm3 = [2,3]
 ; AVX-64-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
-; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0]
+; AVX-64-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1]
 ; AVX-64-NEXT:    vpaddq %xmm4, %xmm1, %xmm1
 ; AVX-64-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
 ; AVX-64-NEXT:    vextractf128 $1, %ymm0, %xmm2

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
index 0771fcea0714cda..39409afcec25137 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
@@ -2157,7 +2157,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm10[2,3,0,1]
 ; AVX2-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0,1,2],ymm11[3],ymm10[4,5,6,7,8,9,10],ymm11[11],ymm10[12,13,14,15]
 ; AVX2-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm10[2,3,2,3,2,3,2,3,8,9,8,9,6,7,4,5,18,19,18,19,18,19,18,19,24,25,24,25,22,23,20,21]
-; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
+; AVX2-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0>
 ; AVX2-SLOW-NEXT:    vpblendvb %ymm10, %ymm8, %ymm11, %ymm8
 ; AVX2-SLOW-NEXT:    vpblendd {{.*#+}} ymm11 = ymm5[0,1],ymm6[2],ymm5[3,4,5],ymm6[6],ymm5[7]
 ; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm11, %xmm12
@@ -2329,7 +2329,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = <2,5,1,u,4,u,u,u>
 ; AVX2-FAST-NEXT:    vpermd %ymm11, %ymm12, %ymm11
 ; AVX2-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm11[2,3,2,3,2,3,2,3,8,9,0,1,6,7,8,9,18,19,18,19,18,19,18,19,24,25,16,17,22,23,24,25]
-; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
+; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0>
 ; AVX2-FAST-NEXT:    vpblendvb %ymm11, %ymm10, %ymm12, %ymm10
 ; AVX2-FAST-NEXT:    vpblendd {{.*#+}} ymm12 = ymm4[0,1],ymm6[2],ymm4[3,4,5],ymm6[6],ymm4[7]
 ; AVX2-FAST-NEXT:    vextracti128 $1, %ymm12, %xmm13
@@ -2496,7 +2496,7 @@ define void @load_i16_stride7_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX2-FAST-PERLANE-NEXT:    vpermq {{.*#+}} ymm12 = ymm11[2,3,0,1]
 ; AVX2-FAST-PERLANE-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3],ymm11[4,5,6,7,8,9,10],ymm12[11],ymm11[12,13,14,15]
 ; AVX2-FAST-PERLANE-NEXT:    vpshufb {{.*#+}} ymm12 = ymm11[2,3,2,3,2,3,2,3,8,9,8,9,6,7,4,5,18,19,18,19,18,19,18,19,24,25,24,25,22,23,20,21]
-; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm11 = [255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0]
+; AVX2-FAST-PERLANE-NEXT:    vmovdqa {{.*#+}} xmm11 = <255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0>
 ; AVX2-FAST-PERLANE-NEXT:    vpblendvb %ymm11, %ymm8, %ymm12, %ymm8
 ; AVX2-FAST-PERLANE-NEXT:    vpblendd {{.*#+}} ymm12 = ymm5[0,1],ymm6[2],ymm5[3,4,5],ymm6[6],ymm5[7]
 ; AVX2-FAST-PERLANE-NEXT:    vextracti128 $1, %ymm12, %xmm13

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
index 29e3247e1451a5f..65c1c9067623522 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-5.ll
@@ -1685,7 +1685,7 @@ define void @load_i8_stride5_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
 ; AVX2-ONLY-NEXT:    # ymm10 = mem[0,1,0,1]
 ; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm7, %ymm8, %ymm7
 ; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,1,6,11,16,21,26,31,20,25,30,19,24,29,u,u,u,u,u,u]
-; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm10 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0]
+; AVX2-ONLY-NEXT:    vmovdqa {{.*#+}} xmm10 = <255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0>
 ; AVX2-ONLY-NEXT:    vpblendvb %ymm10, %ymm6, %ymm7, %ymm6
 ; AVX2-ONLY-NEXT:    vmovdqa 144(%rdi), %xmm7
 ; AVX2-ONLY-NEXT:    vpshufb {{.*#+}} xmm11 = xmm7[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm7[1,6,11]

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
index f91dd72bfe3ff5a..b12edef01085f3e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
@@ -1238,13 +1238,12 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm3 = zmm3[0,1,2,3],zmm6[4,5,6,7]
 ; AVX512F-NEXT:    vmovdqa (%rdx), %ymm6
 ; AVX512F-NEXT:    vmovdqa 32(%rdx), %ymm7
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = <5,5,u,6,6,u,7,7>
-; AVX512F-NEXT:    vpermd %ymm7, %ymm9, %ymm9
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm10 = [0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0]
-; AVX512F-NEXT:    vpandn %ymm9, %ymm10, %ymm9
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm10 = [128,128,10,11,128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128]
-; AVX512F-NEXT:    vpshufb %ymm10, %ymm7, %ymm7
-; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm9 = [128,128,10,11,128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128]
+; AVX512F-NEXT:    vpshufb %ymm9, %ymm7, %ymm10
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm11 = <5,5,u,6,6,u,7,7>
+; AVX512F-NEXT:    vpermd %ymm7, %ymm11, %ymm7
+; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm7, %zmm10, %zmm7
 ; AVX512F-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm7
 ; AVX512F-NEXT:    vmovdqa (%rdi), %ymm3
 ; AVX512F-NEXT:    vpshufb %ymm5, %ymm3, %ymm3
@@ -1259,7 +1258,7 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
 ; AVX512F-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm0
 ; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm3[4,5,6,7]
-; AVX512F-NEXT:    vpshufb %ymm10, %ymm6, %ymm1
+; AVX512F-NEXT:    vpshufb %ymm9, %ymm6, %ymm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,0,0,u,1,1,u,2>
 ; AVX512F-NEXT:    vpermd %ymm6, %ymm2, %ymm2
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
index d821b370f78a699..f77719f01a857a2 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
@@ -2831,15 +2831,15 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,0]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm9, %zmm2
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm9
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm10
+; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm10
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm9
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm10, %ymm4
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,1,1,1]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm9, %ymm4
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,1,1]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512F-SLOW-NEXT:    vpandnq %ymm10, %ymm21, %ymm10
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm10, %zmm10
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm10
+; AVX512F-SLOW-NEXT:    vpandnq %ymm9, %ymm21, %ymm9
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm9, %zmm9
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm9
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm2
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm12[0],xmm2[0],xmm12[1],xmm2[1],xmm12[2],xmm2[2],xmm12[3],xmm2[3]
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm4, %xmm4
@@ -2860,7 +2860,7 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
 ; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm7, %zmm4
 ; AVX512F-SLOW-NEXT:    vpbroadcastq (%r8), %ymm2
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm9[0,1,1,1]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm10[0,1,1,1]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm2, %zmm2
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm2
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm18[0,1,2,1,4,5,6,5]
@@ -2909,16 +2909,15 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm1
 ; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm16, %zmm1
-; AVX512F-SLOW-NEXT:    vpbroadcastq 16(%r8), %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpandn %ymm0, %ymm3, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm9, %ymm3
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm10, %ymm0
+; AVX512F-SLOW-NEXT:    vpbroadcastq 16(%r8), %ymm3
+; AVX512F-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
 ; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, 64(%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm4, 256(%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, (%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm10, 192(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm9, 192(%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm19, 128(%r9)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
@@ -3019,11 +3018,10 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm13, %zmm7
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
 ; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm20, %zmm7
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm24, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vpbroadcastq 16(%r8), %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpandn %ymm3, %ymm13, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm24, %ymm11
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm0
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29]

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
index 473ac8a546f9048..4d9c5a89b082b2e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
@@ -2522,8 +2522,7 @@ define void @store_i16_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm12
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 8(%rax), %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpandnq %ymm10, %ymm20, %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm10, %ymm10
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm8[0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
@@ -2789,8 +2788,7 @@ define void @store_i16_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm12
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FAST-NEXT:    vpbroadcastd 8(%rax), %ymm10
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpandnq %ymm10, %ymm20, %ymm10
+; AVX512DQ-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm10, %ymm10
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm14 = xmm8[0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[0,1,2,3,6,7,4,5,6,7,4,5,12,13,14,15]
@@ -5252,330 +5250,336 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512F-ONLY-SLOW-LABEL: store_i16_stride7_vf32:
 ; AVX512F-ONLY-SLOW:       # %bb.0:
-; AVX512F-ONLY-SLOW-NEXT:    subq $648, %rsp # imm = 0x288
+; AVX512F-ONLY-SLOW-NEXT:    subq $632, %rsp # imm = 0x278
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rcx), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm1, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm4, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm30
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rsi), %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm4, %ymm2, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm9, %ymm5, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm31
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm9, %ymm1, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm16
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm2, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm29
+; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm12, %ymm1, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm1, %ymm15
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm2, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm17
+; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm10, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %ymm11
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm11, %ymm3
 ; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm2, %ymm3, %ymm2
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %ymm15
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm2, %ymm15, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm3, %ymm6, %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm5, %ymm7, %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r9), %xmm7
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r8), %xmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm4, %ymm5, %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm11
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm9, %ymm11, %ymm9
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm4, %ymm9, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r9), %xmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r8), %xmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm11[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm10[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,3,6,6,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm2[2],ymm4[3,4],ymm2[5],ymm4[6,7,8,9],ymm2[10],ymm4[11,12],ymm2[13],ymm4[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [4,5,4,5,4,5,6,7,16,17,16,17,16,17,17,19]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm2[0,1,3,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rcx), %xmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <16,18,19,19,19,19,u,u,0,1,0,1,2,3,2,3>
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm2, %zmm3, %zmm4
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm13
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm13, %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm14
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm14, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm12, %ymm6, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm14, %ymm3, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm9, %ymm12, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm13, %ymm7, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r9), %ymm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r8), %ymm14
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm1, %ymm13, %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm0, %ymm14, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r9), %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm2, %ymm4, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%r8), %ymm0
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm3, %ymm0, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm6[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm15[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,3,6,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [4,5,4,5,4,5,6,7,16,17,16,17,16,17,17,19]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm1[0,1,3,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm3, %zmm2, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm2, %xmm8, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm20
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <u,16,u,u,17,17,u,u,0,u,u,1,2,u,u,3>
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm2, %zmm1, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm4[0,1,2,3,4,5,7,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,3,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <0,1,0,1,0,1,1,3,16,18,19,19,19,19,u,u>
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm9
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rcx), %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,7,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm2, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <16,18,19,19,19,19,u,u,0,1,0,1,2,3,2,3>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm1, %zmm2, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdx), %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm7, %xmm9, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[1,1,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2,3],xmm1[4],xmm3[5,6],xmm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm2[0],xmm9[0],xmm2[1],xmm9[1],xmm2[2],xmm9[2],xmm2[3],xmm9[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,3,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <u,16,u,u,17,17,u,u,0,u,u,1,2,u,u,3>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm3, %zmm1, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r9), %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%r8), %xmm12
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm12[0],xmm3[0],xmm12[1],xmm3[1],xmm12[2],xmm3[2],xmm12[3],xmm3[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm1[0,1,2,3,4,5,7,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = <0,1,0,1,0,1,1,3,16,18,19,19,19,19,u,u>
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2d %zmm8, %zmm1, %zmm26
-; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %ymm15, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm6[1,2,2,3,5,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm8[0,1],ymm1[2],ymm8[3,4],ymm1[5],ymm8[6,7,8,9],ymm1[10],ymm8[11,12],ymm1[13],ymm8[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,0,2,1,4,4,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm15[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[0,0,0,0,4,4,4,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm8[0,1,2],ymm6[3],ymm8[4,5],ymm6[6],ymm8[7,8,9,10],ymm6[11],ymm8[12,13],ymm6[14],ymm8[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [2,2,3,3,10,9,11,10]
-; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm1, %zmm6, %zmm28
+; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %ymm10, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm11[1,2,2,3,5,6,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7,8,9],ymm4[10],ymm5[11,12],ymm4[13],ymm5[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm11[0,0,2,1,4,4,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm10[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[0,0,0,0,4,4,4,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm9[0,1,2],ymm5[3],ymm9[4,5],ymm5[6],ymm9[7,8,9,10],ymm5[11],ymm9[12,13],ymm5[14],ymm9[15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [2,2,3,3,10,9,11,10]
+; AVX512F-ONLY-SLOW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm27
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm9[4],xmm2[5],xmm9[5],xmm2[6],xmm9[6],xmm2[7],xmm9[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm25
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rax), %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 8(%rax), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpandn %ymm1, %ymm2, %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm6, %ymm8, %ymm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm12[4],xmm3[4],xmm12[5],xmm3[5],xmm12[6],xmm3[6],xmm12[7],xmm3[7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm5[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,2,1,4,4,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm11[1,1,1,1,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7,8,9],ymm3[10],ymm1[11,12],ymm3[13],ymm1[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm13[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm14[0,1,1,3,4,5,5,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7,8,9],ymm1[10],ymm3[11,12],ymm1[13],ymm3[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rax), %ymm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm1[0,1,1,3,4,5,5,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpandn %ymm3, %ymm8, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm6, %ymm1, %ymm6
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm6, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,0,0,0,4,4,4,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm0[0,0,2,1,4,4,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm6[3],ymm3[4,5],ymm6[6],ymm3[7,8,9,10],ymm6[11],ymm3[12,13],ymm6[14],ymm3[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm27
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
-; AVX512F-ONLY-SLOW-NEXT:    # ymm15 = mem[0,1,0,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm13, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm14[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7,8,9],ymm6[10],ymm3[11,12],ymm6[13],ymm3[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm22
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm14[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm13[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0],ymm6[1],ymm3[2,3],ymm6[4],ymm3[5,6,7,8],ymm6[9],ymm3[10,11],ymm6[12],ymm3[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm17
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm5[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm11[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm6[0],ymm3[1],ymm6[2,3],ymm3[4],ymm6[5,6,7,8],ymm3[9],ymm6[10,11],ymm3[12],ymm6[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm16
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm11[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm13 = ymm3[0,1,2],ymm5[3],ymm3[4,5],ymm5[6],ymm3[7,8,9,10],ymm5[11],ymm3[12,13],ymm5[14],ymm3[15]
-; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %ymm4, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm0[1,2,2,3,5,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm3[2],ymm5[3,4],ymm3[5],ymm5[6,7,8,9],ymm3[10],ymm5[11,12],ymm3[13],ymm5[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm24
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm4[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,2,2,3,6,6,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1],ymm0[2],ymm3[3,4],ymm0[5],ymm3[6,7,8,9],ymm0[10],ymm3[11,12],ymm0[13],ymm3[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm31
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rax), %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 8(%rax), %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm5, %ymm4, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm28
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm7[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm30
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm12[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm7[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm0 = [6,5,0,0,7,6,0,7,6,5,0,0,7,6,0,7]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermd %zmm1, %zmm0, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu (%rsp), %ymm11 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm11[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm6[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm30[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm14[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm6[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
 ; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm31[3,3,3,3,7,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8,9,10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %xmm1, %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[1,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm6[0,1],xmm3[2],xmm6[3,4],xmm3[5],xmm6[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm21
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm20
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm7, %xmm1, %xmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[1,1,2,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0],xmm0[1],xmm7[2,3],xmm0[4],xmm7[5,6],xmm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8,9,10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm19
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm18
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm12, %xmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm3, %xmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rsi), %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm10, %xmm7, %xmm10
-; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
-; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %xmm6, %xmm6
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm3[0,1],xmm6[2],xmm3[3,4],xmm6[5],xmm3[6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm11[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,0,0,0,4,4,4,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm30[0,1,1,3,4,5,5,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm3[2],ymm6[3,4],ymm3[5],ymm6[6,7,8,9],ymm3[10],ymm6[11,12],ymm3[13],ymm6[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm15, %ymm11, %ymm3
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm30[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm3[0,1],ymm12[2],ymm3[3,4],ymm12[5],ymm3[6,7,8,9],ymm12[10],ymm3[11,12],ymm12[13],ymm3[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm14[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,0,2,1,4,4,6,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm31[1,1,1,1,5,5,5,5]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm12[2],ymm3[3,4],ymm12[5],ymm3[6,7,8,9],ymm12[10],ymm3[11,12],ymm12[13],ymm3[14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm31[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[2,2,2,2,6,6,6,6]
-; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0],ymm14[1],ymm12[2,3],ymm14[4],ymm12[5,6,7,8],ymm14[9],ymm12[10,11],ymm14[12],ymm12[13,14,15]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm1
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm1[0,2,3,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[0,0,1,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm22[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm31 = ymm17[0,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm16[0,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,1,3,2,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,0,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm31, %zmm30, %zmm30
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm30, %zmm13, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,1,3]
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm9, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm8, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm5, %zmm13, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14, %zmm5 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm8 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm5, %zmm31, %zmm8
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm2[0,1,2,3],zmm1[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm2 = mem[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm5 = mem[2,1,3,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm27[2,2,3,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm24[2,1,3,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm23[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufd $254, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
-; AVX512F-ONLY-SLOW-NEXT:    # ymm17 = mem[2,3,3,3,6,7,7,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm21[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm9[2,1,2,3,4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,5,4]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,1,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm19[0,0,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm9
-; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,2,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %ymm13, %ymm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm14[1,2,2,3,5,6,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm13[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,3,6,6,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm14[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm4
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm4[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm29[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm18
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm15, %ymm8
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm15[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm17[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8,9,10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %xmm9, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm0[1,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm15[0,1],xmm1[2],xmm15[3,4],xmm1[5],xmm15[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm0, %xmm9, %xmm0
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm15
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm15[1,1,2,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm11[0],xmm0[1],xmm11[2,3],xmm0[4],xmm11[5,6],xmm0[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm26
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm15[0],xmm9[0],xmm15[1],xmm9[1],xmm15[2],xmm9[2],xmm15[3],xmm9[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm15[4],xmm9[4],xmm15[5],xmm9[5],xmm15[6],xmm9[6],xmm15[7],xmm9[7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm25
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[1,1,1,1,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm6[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,0,2,1,4,4,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm6[0,1],ymm3[2],ymm6[3,4],ymm3[5],ymm6[6,7,8,9],ymm3[10],ymm6[11,12],ymm3[13],ymm6[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm24
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm7[0,1,1,3,4,5,5,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm12[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,0,0,0,4,4,4,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7,8,9],ymm6[10],ymm3[11,12],ymm6[13],ymm3[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm22
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm14[0,0,2,1,4,4,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm13[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,0,0,0,4,4,4,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm6[0,1,2],ymm3[3],ymm6[4,5],ymm3[6],ymm6[7,8,9,10],ymm3[11],ymm6[12,13],ymm3[14],ymm6[15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm21
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [6,5,0,0,7,6,0,7,6,5,0,0,7,6,0,7]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 32(%rax), %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vpermd %zmm6, %zmm3, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm7
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %ymm7, %ymm6, %ymm11
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,1,1,3,4,5,5,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm6
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm11, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm7, %xmm2, %xmm11
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rdi), %xmm13
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa (%rsi), %xmm14
+; AVX512F-ONLY-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm13[0],xmm14[0],xmm13[1],xmm14[1],xmm13[2],xmm14[2],xmm13[3],xmm14[3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb %xmm7, %xmm2, %xmm2
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm20
+; AVX512F-ONLY-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm15 = xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
+; AVX512F-ONLY-SLOW-NEXT:    vprold $16, %xmm14, %xmm14
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[1,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm13[0,1],xmm14[2],xmm13[3,4],xmm14[5],xmm13[6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm16
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa %ymm4, %ymm2
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[0,0,0,0,4,4,4,4]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm12
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm29[0,1,1,3,4,5,5,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm9[0,1],ymm14[2],ymm9[3,4],ymm14[5],ymm9[6,7,8,9],ymm14[10],ymm9[11,12],ymm14[13],ymm9[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm4[0,2,3,3,4,5,6,7]
 ; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,3,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,2,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm15[2,1,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,5,5,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,0,1,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,1,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm30[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm29 = ymm23[0,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm19[2,1,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm18[0,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,3,2,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm31 = ymm5[2,1,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm12[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm8[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[0,0,2,1,4,4,6,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm17[1,1,1,1,5,5,5,5]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm17[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} ymm12 = ymm8[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[2,2,2,2,6,6,6,6]
+; AVX512F-ONLY-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2,3],ymm12[4],ymm5[5,6,7,8],ymm12[9],ymm5[10,11],ymm12[12],ymm5[13,14,15]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm29, %zmm28, %zmm12
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm10, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $184, %zmm12, %zmm10, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm30, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm31, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm10, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm0 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm9 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm10, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm11[0,1,2,3],zmm0[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm11 = mem[2,1,3,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm12 = mem[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufd $254, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm15 = mem[2,3,3,3,6,7,7,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # ymm17 = mem[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; AVX512F-ONLY-SLOW-NEXT:    # xmm8 = mem[2,1,2,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,5,4]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,1,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm26[0,0,1,1]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm13
+; AVX512F-ONLY-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,2,3,3,4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm19 = ymm24[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm22[2,1,3,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm29 = ymm21[2,2,3,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm20[0,0,1,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm31 = ymm16[0,0,2,1]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,1,3,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,2,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm0
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm3
+; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm15[2,1,3,2]
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 32(%rax), %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm7
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm17, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm18, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm10, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 36(%rax), %ymm1
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 40(%rax), %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm1, %zmm1
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm1
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm8, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm8, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm8[0,1,2,3],zmm2[4,5,6,7]
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm2
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm14, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm29
-; AVX512F-ONLY-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm17[2,1,3,2]
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 32(%rax), %ymm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm0
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm22, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm30, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $226, %zmm4, %zmm31, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 36(%rax), %ymm4
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 40(%rax), %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm4, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm4
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm10, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd (%rax), %ymm7
-; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 4(%rax), %ymm8
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm26, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm7
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm6, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm3, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm3
-; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,5,4,0,0,6,5,0,0,5,4,0,0,6,5,0]
-; AVX512F-ONLY-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-SLOW-NEXT:    vpermd (%rax), %zmm5, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm28, %zmm5
-; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm5
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm8, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm28, %zmm9, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm29, %zmm0, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm10[0,1,2,3],zmm8[4,5,6,7]
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm6
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm31, %zmm30, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm8
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd (%rax), %ymm9
+; AVX512F-ONLY-SLOW-NEXT:    vpbroadcastd 4(%rax), %ymm10
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm9
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm14, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm4
+; AVX512F-ONLY-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,5,4,0,0,6,5,0,0,5,4,0,0,6,5,0]
+; AVX512F-ONLY-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-SLOW-NEXT:    vpermd (%rax), %zmm2, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm27, %zmm2
+; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm2
 ; AVX512F-ONLY-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm5, 128(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, (%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 320(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm4, 256(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 192(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 64(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm29, 384(%rax)
-; AVX512F-ONLY-SLOW-NEXT:    addq $648, %rsp # imm = 0x288
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm2, 128(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm6, 320(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm1, 256(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm7, 192(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm0, 64(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    vmovdqa64 %zmm3, 384(%rax)
+; AVX512F-ONLY-SLOW-NEXT:    addq $632, %rsp # imm = 0x278
 ; AVX512F-ONLY-SLOW-NEXT:    vzeroupper
 ; AVX512F-ONLY-SLOW-NEXT:    retq
 ;
@@ -5609,9 +5613,9 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm13
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm14
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm15, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm14, %ymm7
 ; AVX512F-ONLY-FAST-NEXT:    vporq %ymm6, %ymm7, %ymm25
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm4, %ymm10, %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm6
@@ -5625,9 +5629,8 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm12, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm1, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm21
@@ -5658,11 +5661,11 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm5, %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm30
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [2,1,3,3,8,8,9,9]
 ; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm19
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm15[3,3,3,3,7,7,7,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm14[3,3,3,3,7,7,7,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7,8,9],ymm3[10],ymm2[11,12],ymm3[13],ymm2[14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [2,2,2,3,8,8,8,9]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %xmm3
@@ -5690,54 +5693,52 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [0,0,0,1,8,9,9,11]
 ; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm3, %zmm27
 ; AVX512F-ONLY-FAST-NEXT:    vprold $16, %ymm13, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm15[1,2,2,3,5,6,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm14[1,2,2,3,5,6,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1],ymm0[2],ymm3[3,4],ymm0[5],ymm3[6,7,8,9],ymm0[10],ymm3[11,12],ymm0[13],ymm3[14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm5 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm13, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm15[0,0,2,1,4,4,6,5]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm14[0,0,2,1,4,4,6,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm7[3],ymm3[4,5],ymm7[6],ymm3[7,8,9,10],ymm7[11],ymm3[12,13],ymm7[14],ymm3[15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [2,2,3,3,10,9,11,10]
 ; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm0, %zmm3, %zmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm8
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm15[4],xmm0[5],xmm15[5],xmm0[6],xmm15[6],xmm0[7],xmm15[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm17
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm8, %xmm18
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm30, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <0,0,1,1,12,13,u,15>
 ; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm2, %zmm1, %zmm25
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd 8(%rax), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpandn %ymm1, %ymm2, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rax), %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm3, %ymm16
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm30
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm29, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[1,1,1,1,5,5,5,5]
-; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm1[0,1],ymm6[2],ymm1[3,4],ymm6[5],ymm1[6,7,8,9],ymm6[10],ymm1[11,12],ymm6[13],ymm1[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm6[2],ymm1[3,4],ymm6[5],ymm1[6,7,8,9],ymm6[10],ymm1[11,12],ymm6[13],ymm1[14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm16
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm31, %ymm13
 ; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm13[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm12[0,1,1,3,4,5,5,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm6[0,1],ymm1[2],ymm6[3,4],ymm1[5],ymm6[6,7,8,9],ymm1[10],ymm6[11,12],ymm1[13],ymm6[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm14, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm14, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm15, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm4[0,0,2,1,4,4,6,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3],ymm1[4,5],ymm5[6],ymm1[7,8,9,10],ymm5[11],ymm1[12,13],ymm5[14],ymm1[15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = <0,1,u,3,10,10,11,11>
 ; AVX512F-ONLY-FAST-NEXT:    vpermi2q %zmm1, %zmm21, %zmm29
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,4,u,u,u,5,u>
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rax), %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm6, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpandn %ymm1, %ymm5, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,4,u,u,u,5,u>
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm6, %ymm2, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm14
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm13[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm12[3,3,3,3,7,7,7,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0],ymm2[1],ymm5[2,3],ymm2[4],ymm5[5,6,7,8],ymm2[9],ymm5[10,11],ymm2[12],ymm5[13,14,15]
@@ -5748,21 +5749,21 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7,8,9],ymm12[10],ymm13[11,12],ymm12[13],ymm13[14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [2,2,2,3,8,10,10,11]
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm21, %zmm13
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm4[3,3,3,3,7,7,7,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm12[2],ymm2[3,4],ymm12[5],ymm2[6,7,8,9],ymm12[10],ymm2[11,12],ymm12[13],ymm2[14,15]
-; AVX512F-ONLY-FAST-NEXT:    vprold $16, %ymm3, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vprold $16, %ymm15, %ymm12
 ; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[1,2,2,3,5,6,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm4[0,1],ymm12[2],ymm4[3,4],ymm12[5],ymm4[6,7,8,9],ymm12[10],ymm4[11,12],ymm12[13],ymm4[14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [2,1,3,2,10,10,10,11]
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm31, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm22, %zmm17, %zmm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm22, %zmm18, %zmm13
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 (%rax), %zmm15
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [30,5,0,0,31,6,0,31,30,5,0,0,31,6,0,31]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm3, %zmm6, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpermi2d %zmm15, %zmm6, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm12 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
@@ -5777,15 +5778,14 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    # xmm6 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm24, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm1[0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm6, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm6
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,1,1,3,8,8,9,9]
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm6, %zmm22, %zmm13
 ; AVX512F-ONLY-FAST-NEXT:    vprold $16, %xmm0, %xmm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm15[1,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm8[1,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm6[2],xmm2[3,4],xmm6[5],xmm2[6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm11, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm6
@@ -5814,8 +5814,8 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm10[0,1],ymm5[2],ymm10[3,4],ymm5[5],ymm10[6,7,8,9],ymm5[10],ymm10[11,12],ymm5[13],ymm10[14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm12[0,2,3,3,4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,1,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm17[0,0,1,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm16[2,2,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,3,2]
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm0, %zmm31, %zmm5
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastd (%rax), %ymm0
@@ -5834,8 +5834,8 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vpermt2q %zmm2, %zmm21, %zmm12
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm12
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <6,u,u,u,7,u,u,7>
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm16, %ymm2, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm3, %zmm3
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm3, %ymm2, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm15, %zmm3
 ; AVX512F-ONLY-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm5 = [0,13,4,0,0,14,5,0,0,13,4,0,0,14,5,0]
 ; AVX512F-ONLY-FAST-NEXT:    # zmm5 = mem[0,1,2,3,0,1,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpermd %zmm3, %zmm5, %zmm3
@@ -5844,7 +5844,7 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm5 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm11, %zmm9
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm23, %zmm17, %zmm19
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm23, %zmm18, %zmm19
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm25, %zmm30
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm30
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
@@ -5872,330 +5872,336 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ;
 ; AVX512DQ-SLOW-LABEL: store_i16_stride7_vf32:
 ; AVX512DQ-SLOW:       # %bb.0:
-; AVX512DQ-SLOW-NEXT:    subq $648, %rsp # imm = 0x288
+; AVX512DQ-SLOW-NEXT:    subq $632, %rsp # imm = 0x278
 ; AVX512DQ-SLOW-NEXT:    vmovdqa (%rcx), %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm1, %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %ymm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm1, %ymm4, %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm30
-; AVX512DQ-SLOW-NEXT:    vpor %ymm2, %ymm3, %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rsi), %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm4, %ymm2, %ymm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm9, %ymm5, %ymm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm31
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm9, %ymm1, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm16
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm13 = <u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u>
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm13, %ymm2, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm29
+; AVX512DQ-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm12 = [128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm12, %ymm1, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm1, %ymm15
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = <12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u,u,u,u,u,16,17,18,19>
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm14, %ymm2, %ymm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm17
+; AVX512DQ-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %ymm10
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm1, %ymm10, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %ymm11
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm11, %ymm3
 ; AVX512DQ-SLOW-NEXT:    vpor %ymm2, %ymm3, %ymm2
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %ymm15
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm2, %ymm15, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %ymm6
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm3, %ymm6, %ymm7
-; AVX512DQ-SLOW-NEXT:    vpor %ymm5, %ymm7, %ymm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r9), %xmm7
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r8), %xmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm5
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm4, %ymm5, %ymm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm11
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm9, %ymm11, %ymm9
-; AVX512DQ-SLOW-NEXT:    vpor %ymm4, %ymm9, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r9), %xmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r8), %xmm6
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm11[3,3,3,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm4 = ymm10[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,2,2,3,6,6,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm2[2],ymm4[3,4],ymm2[5],ymm4[6,7,8,9],ymm2[10],ymm4[11,12],ymm2[13],ymm4[14,15]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [4,5,4,5,4,5,6,7,16,17,16,17,16,17,17,19]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm2[0,1,3,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rcx), %xmm8
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,6]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <16,18,19,19,19,19,u,u,0,1,0,1,2,3,2,3>
+; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm2, %zmm3, %zmm4
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm13
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm13, %ymm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm14
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm1, %ymm14, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm6
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm12, %ymm6, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm14, %ymm3, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm12
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm7
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm9, %ymm12, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm13, %ymm7, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r9), %ymm13
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r8), %ymm14
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm1, %ymm13, %ymm1
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm0, %ymm14, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpor %ymm1, %ymm0, %ymm0
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r9), %ymm4
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm2, %ymm4, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%r8), %ymm0
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm3, %ymm0, %ymm2
-; AVX512DQ-SLOW-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm6[3,3,3,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm2 = ymm15[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm2[2,2,2,3,6,6,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm1[2],ymm2[3,4],ymm1[5],ymm2[6,7,8,9],ymm1[10],ymm2[11,12],ymm1[13],ymm2[14,15]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [4,5,4,5,4,5,6,7,16,17,16,17,16,17,17,19]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm1[0,1,3,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm3, %zmm2, %zmm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %xmm0
+; AVX512DQ-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm2 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm2, %xmm8, %xmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm20
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,2]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm2[0,1,3,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = <u,16,u,u,17,17,u,u,0,u,u,1,2,u,u,3>
+; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm2, %zmm1, %zmm4
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %xmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %xmm2
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm4[0,1,2,3,4,5,7,6]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm4[0,1,3,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = <0,1,0,1,0,1,1,3,16,18,19,19,19,19,u,u>
+; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm5, %zmm4, %zmm9
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rcx), %xmm9
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,7,6]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm2, %xmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <16,18,19,19,19,19,u,u,0,1,0,1,2,3,2,3>
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm1, %zmm2, %zmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdx), %xmm2
-; AVX512DQ-SLOW-NEXT:    vpbroadcastq {{.*#+}} xmm7 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm7, %xmm9, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm2[1,1,2,2]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0],xmm1[1],xmm3[2,3],xmm1[4],xmm3[5,6],xmm1[7]
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm2[0],xmm9[0],xmm2[1],xmm9[1],xmm2[2],xmm9[2],xmm2[3],xmm9[3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm3[0,1,3,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <u,16,u,u,17,17,u,u,0,u,u,1,2,u,u,3>
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm3, %zmm1, %zmm8
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%r9), %xmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%r8), %xmm12
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm12[0],xmm3[0],xmm12[1],xmm3[1],xmm12[2],xmm3[2],xmm12[3],xmm3[3]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm1[0,1,2,3,4,5,7,6]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm26 = <0,1,0,1,0,1,1,3,16,18,19,19,19,19,u,u>
-; AVX512DQ-SLOW-NEXT:    vpermi2d %zmm8, %zmm1, %zmm26
-; AVX512DQ-SLOW-NEXT:    vprold $16, %ymm15, %ymm1
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm6[1,2,2,3,5,6,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm8[0,1],ymm1[2],ymm8[3,4],ymm1[5],ymm8[6,7,8,9],ymm1[10],ymm8[11,12],ymm1[13],ymm8[14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,0,2,1,4,4,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm8 = ymm15[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[0,0,0,0,4,4,4,4]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm8[0,1,2],ymm6[3],ymm8[4,5],ymm6[6],ymm8[7,8,9,10],ymm6[11],ymm8[12,13],ymm6[14],ymm8[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [2,2,3,3,10,9,11,10]
-; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm1, %zmm6, %zmm28
+; AVX512DQ-SLOW-NEXT:    vprold $16, %ymm10, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm11[1,2,2,3,5,6,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm5[0,1],ymm4[2],ymm5[3,4],ymm4[5],ymm5[6,7,8,9],ymm4[10],ymm5[11,12],ymm4[13],ymm5[14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm11[0,0,2,1,4,4,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm9 = ymm10[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm9[0,0,0,0,4,4,4,4]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm9[0,1,2],ymm5[3],ymm9[4,5],ymm5[6],ymm9[7,8,9,10],ymm5[11],ymm9[12,13],ymm5[14],ymm9[15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [2,2,3,3,10,9,11,10]
+; AVX512DQ-SLOW-NEXT:    vpermi2q %zmm4, %zmm5, %zmm27
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm2[4],xmm9[4],xmm2[5],xmm9[5],xmm2[6],xmm9[6],xmm2[7],xmm9[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm25
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rax), %ymm8
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpbroadcastd 8(%rax), %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpandn %ymm1, %ymm2, %ymm1
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm6 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm6, %ymm8, %ymm2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm12 = xmm12[4],xmm3[4],xmm12[5],xmm3[5],xmm12[6],xmm3[6],xmm12[7],xmm3[7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm5[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,2,1,4,4,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm11[1,1,1,1,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7,8,9],ymm3[10],ymm1[11,12],ymm3[13],ymm1[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm13[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,0,0,0,4,4,4,4]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm14[0,1,1,3,4,5,5,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0,1],ymm1[2],ymm3[3,4],ymm1[5],ymm3[6,7,8,9],ymm1[10],ymm3[11,12],ymm1[13],ymm3[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rax), %ymm1
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm1[0,1,1,3,4,5,5,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpandn %ymm3, %ymm8, %ymm3
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm6, %ymm1, %ymm6
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm6, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,0,0,0,4,4,4,4]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm0[0,0,2,1,4,4,6,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm6[3],ymm3[4,5],ymm6[6],ymm3[7,8,9,10],ymm6[11],ymm3[12,13],ymm6[14],ymm3[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm27
-; AVX512DQ-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm15 = [22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27,22,23,26,27,0,0,24,25,26,27,0,0,26,27,26,27]
-; AVX512DQ-SLOW-NEXT:    # ymm15 = mem[0,1,0,1]
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm15, %ymm13, %ymm3
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm14[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7,8,9],ymm6[10],ymm3[11,12],ymm6[13],ymm3[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm22
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm14[3,3,3,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm6 = ymm13[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm3[0],ymm6[1],ymm3[2,3],ymm6[4],ymm3[5,6,7,8],ymm6[9],ymm3[10,11],ymm6[12],ymm3[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm17
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm5[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm11[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm6[0],ymm3[1],ymm6[2,3],ymm3[4],ymm6[5,6,7,8],ymm3[9],ymm6[10,11],ymm3[12],ymm6[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm16
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm11[3,3,3,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm13 = ymm3[0,1,2],ymm5[3],ymm3[4,5],ymm5[6],ymm3[7,8,9,10],ymm5[11],ymm3[12,13],ymm5[14],ymm3[15]
-; AVX512DQ-SLOW-NEXT:    vprold $16, %ymm4, %ymm3
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm0[1,2,2,3,5,6,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm3[2],ymm5[3,4],ymm3[5],ymm5[6,7,8,9],ymm3[10],ymm5[11,12],ymm3[13],ymm5[14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm24
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[3,3,3,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm4[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,2,2,3,6,6,6,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1],ymm0[2],ymm3[3,4],ymm0[5],ymm3[6,7,8,9],ymm0[10],ymm3[11,12],ymm0[13],ymm3[14,15]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm31
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rax), %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpbroadcastd 8(%rax), %ymm0
+; AVX512DQ-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm5 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm5, %ymm4, %ymm4
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm28
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm7[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm30
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm12[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm7[3,3,3,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm23
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x8 {{.*#+}} zmm0 = [6,5,0,0,7,6,0,7,6,5,0,0,7,6,0,7]
-; AVX512DQ-SLOW-NEXT:    # zmm0 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermd %zmm1, %zmm0, %zmm29
-; AVX512DQ-SLOW-NEXT:    vmovdqu (%rsp), %ymm11 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm11[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm6[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm30[3,3,3,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm9 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm14[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm10 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm6[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
 ; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm31[3,3,3,3,7,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8,9,10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm1
-; AVX512DQ-SLOW-NEXT:    vprold $16, %xmm1, %xmm3
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[1,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm6[0,1],xmm3[2],xmm6[3,4],xmm3[5],xmm6[6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm21
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm20
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm7, %xmm1, %xmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[1,1,2,2]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm7[0],xmm0[1],xmm7[2,3],xmm0[4],xmm7[5,6],xmm0[7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[3,3,3,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8,9,10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm19
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm5 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3]
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm18
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm12, %xmm2
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm3, %xmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512DQ-SLOW-NEXT:    vmovdqa (%rsi), %xmm6
-; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3]
-; AVX512DQ-SLOW-NEXT:    vpshufb %xmm10, %xmm7, %xmm10
-; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7]
-; AVX512DQ-SLOW-NEXT:    vprold $16, %xmm6, %xmm6
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm7 = xmm3[0,1],xmm6[2],xmm3[3,4],xmm6[5],xmm3[6,7]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm11[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,0,0,0,4,4,4,4]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm30[0,1,1,3,4,5,5,7]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm3[2],ymm6[3,4],ymm3[5],ymm6[6,7,8,9],ymm3[10],ymm6[11,12],ymm3[13],ymm6[14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufb %ymm15, %ymm11, %ymm3
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm30[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm15 = ymm3[0,1],ymm12[2],ymm3[3,4],ymm12[5],ymm3[6,7,8,9],ymm12[10],ymm3[11,12],ymm12[13],ymm3[14,15]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm3 = ymm14[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[0,0,2,1,4,4,6,5]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm31[1,1,1,1,5,5,5,5]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm12[2],ymm3[3,4],ymm12[5],ymm3[6,7,8,9],ymm12[10],ymm3[11,12],ymm12[13],ymm3[14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm31[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm14 = ymm14[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[2,2,2,2,6,6,6,6]
-; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0],ymm14[1],ymm12[2,3],ymm14[4],ymm12[5,6,7,8],ymm14[9],ymm12[10,11],ymm14[12],ymm12[13,14,15]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm1
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm1[0,2,3,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,4]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm0[0,0,1,3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,0,1,1]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm22[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm31 = ymm17[0,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm16[0,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm5[0,1,3,2,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[0,0,1,1]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,1,3,3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm31, %zmm30, %zmm30
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm13 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm30, %zmm13, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,1,3]
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm9, %zmm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm8, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm5, %zmm13, %zmm4
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm14, %zmm5 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm8 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm5, %zmm31, %zmm8
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm1 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm2[0,1,2,3],zmm1[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm2 = mem[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm5 = mem[2,1,3,2]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm27[2,2,3,3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm24[2,1,3,2]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm23[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpshufd $254, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
-; AVX512DQ-SLOW-NEXT:    # ymm17 = mem[2,3,3,3,6,7,7,7]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm21[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm9
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm9[2,1,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm11 = xmm11[0,1,2,3,4,5,5,4]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,1,3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm19[0,0,1,1]
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm9
-; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm9[0,2,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vprold $16, %ymm13, %ymm0
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm14[1,2,2,3,5,6,6,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm13[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,3,6,6,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm14[3,3,3,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm4
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm4[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm29[3,3,3,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6,7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm18
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm15, %ymm8
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm15[0,1,2,3,7,6,6,7,8,9,10,11,15,14,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm17[3,3,3,3,7,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm1[0,1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8,9,10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm9
+; AVX512DQ-SLOW-NEXT:    vprold $16, %xmm9, %xmm1
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm0[1,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm1 = xmm15[0,1],xmm1[2],xmm15[3,4],xmm1[5],xmm15[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm9[0],xmm0[1],xmm9[1],xmm0[2],xmm9[2],xmm0[3],xmm9[3]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm0 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm9
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm0
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm0, %xmm9, %xmm0
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm15
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm11 = xmm15[1,1,2,2]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm0 = xmm11[0],xmm0[1],xmm11[2,3],xmm0[4],xmm11[5,6],xmm0[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm26
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm15[0],xmm9[0],xmm15[1],xmm9[1],xmm15[2],xmm9[2],xmm15[3],xmm9[3]
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm9 = xmm15[4],xmm9[4],xmm15[5],xmm9[5],xmm15[6],xmm9[6],xmm15[7],xmm9[7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm9, %xmm25
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[1,1,1,1,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm6[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,0,2,1,4,4,6,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm6[0,1],ymm3[2],ymm6[3,4],ymm3[5],ymm6[6,7,8,9],ymm3[10],ymm6[11,12],ymm3[13],ymm6[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm24
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm7[0,1,1,3,4,5,5,7]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm12[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,0,0,0,4,4,4,4]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7,8,9],ymm6[10],ymm3[11,12],ymm6[13],ymm3[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm22
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm14[0,0,2,1,4,4,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm6 = ymm13[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,0,0,0,4,4,4,4]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm6[0,1,2],ymm3[3],ymm6[4,5],ymm3[6],ymm6[7,8,9,10],ymm3[11],ymm6[12,13],ymm3[14],ymm6[15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm21
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x8 {{.*#+}} zmm3 = [6,5,0,0,7,6,0,7,6,5,0,0,7,6,0,7]
+; AVX512DQ-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa 32(%rax), %ymm6
+; AVX512DQ-SLOW-NEXT:    vpermd %zmm6, %zmm3, %zmm3
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm7
+; AVX512DQ-SLOW-NEXT:    vpshufb %ymm7, %ymm6, %ymm11
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,1,1,3,4,5,5,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm6
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm11, %zmm6
+; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm7, %xmm2, %xmm11
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rdi), %xmm13
+; AVX512DQ-SLOW-NEXT:    vmovdqa (%rsi), %xmm14
+; AVX512DQ-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm13[0],xmm14[0],xmm13[1],xmm14[1],xmm13[2],xmm14[2],xmm13[3],xmm14[3]
+; AVX512DQ-SLOW-NEXT:    vpshufb %xmm7, %xmm2, %xmm2
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm20
+; AVX512DQ-SLOW-NEXT:    vpunpckhwd {{.*#+}} xmm15 = xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
+; AVX512DQ-SLOW-NEXT:    vprold $16, %xmm14, %xmm14
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm13 = xmm13[1,1,2,3]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} xmm2 = xmm13[0,1],xmm14[2],xmm13[3,4],xmm14[5],xmm13[6,7]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm16
+; AVX512DQ-SLOW-NEXT:    vmovdqa %ymm4, %ymm2
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm4[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[0,0,0,0,4,4,4,4]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm12
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm9 = ymm29[0,1,1,3,4,5,5,7]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm14 = ymm9[0,1],ymm14[2],ymm9[3,4],ymm14[5],ymm9[6,7,8,9],ymm14[10],ymm9[11,12],ymm14[13],ymm9[14,15]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm4
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm4[0,2,3,3,4,5,6,7]
 ; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,0,2,1]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,1,3,2]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[0,2,2,3]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm18 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm15 = xmm15[2,1,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,4,5,5,4]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[0,0,1,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,1,1]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm30[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm29 = ymm23[0,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm19[2,1,3,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm18[0,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[0,1,3,2,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm31 = ymm5[2,1,3,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1]
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,u,u,24,25,26,27,u,u,26,27,26,27]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm12[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm8[1,2,3,3,4,5,6,7,9,10,11,11,12,13,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[0,0,2,1,4,4,6,5]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm17[1,1,1,1,5,5,5,5]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7,8,9],ymm5[10],ymm4[11,12],ymm5[13],ymm4[14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm17[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} ymm12 = ymm8[0,1,2,3,5,4,6,7,8,9,10,11,13,12,14,15]
+; AVX512DQ-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm12[2,2,2,2,6,6,6,6]
+; AVX512DQ-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2,3],ymm12[4],ymm5[5,6,7,8],ymm12[9],ymm5[10,11],ymm12[12],ymm5[13,14,15]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm29, %zmm28, %zmm12
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm10, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $184, %zmm12, %zmm10, %zmm7
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3]
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm30, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm31, %zmm1
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm10, %zmm1
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm0 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm9 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm10, %zmm9
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm11[0,1,2,3],zmm0[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq $182, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm11 = mem[2,1,3,2]
+; AVX512DQ-SLOW-NEXT:    vpermq $234, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm12 = mem[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpshufd $254, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm15 = mem[2,3,3,3,6,7,7,7]
+; AVX512DQ-SLOW-NEXT:    vpermq $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # ymm17 = mem[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vpshuflw $230, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
+; AVX512DQ-SLOW-NEXT:    # xmm8 = mem[2,1,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,5,4]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,0,1,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm26[0,0,1,1]
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm13
+; AVX512DQ-SLOW-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm13[0,2,3,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm19 = ymm24[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm28 = ymm22[2,1,3,2]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm29 = ymm21[2,2,3,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm30 = ymm20[0,0,1,1]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm31 = ymm16[0,0,2,1]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,1,3,2]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,2,2,3]
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm16 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm16, %zmm0
+; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm0
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm11, %zmm9
+; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm9
+; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm3
+; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm15[2,1,3,2]
+; AVX512DQ-SLOW-NEXT:    vpbroadcastd 32(%rax), %ymm9
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm7
+; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm7
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm17, %zmm1
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm18, %zmm8
+; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm1, %zmm10, %zmm8
+; AVX512DQ-SLOW-NEXT:    vpbroadcastd 36(%rax), %ymm1
+; AVX512DQ-SLOW-NEXT:    vpbroadcastd 40(%rax), %ymm9
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm1, %zmm1
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm1
 ; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm8, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm8, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm2
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm8[0,1,2,3],zmm2[4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm2
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm2
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm16, %zmm14, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm29
-; AVX512DQ-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm17[2,1,3,2]
-; AVX512DQ-SLOW-NEXT:    vpbroadcastd 32(%rax), %ymm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm0
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm0
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm0
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm22, %zmm4
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm30, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $226, %zmm4, %zmm31, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpbroadcastd 36(%rax), %ymm4
-; AVX512DQ-SLOW-NEXT:    vpbroadcastd 40(%rax), %ymm8
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm4, %zmm4
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm4
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm4
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm10, %zmm5
-; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512DQ-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpbroadcastd (%rax), %ymm7
-; AVX512DQ-SLOW-NEXT:    vpbroadcastd 4(%rax), %ymm8
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm26, %zmm7
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm7
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm15, %zmm6, %zmm5
-; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm3, %zmm3
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm3
-; AVX512DQ-SLOW-NEXT:    vbroadcasti32x8 {{.*#+}} zmm5 = [0,5,4,0,0,6,5,0,0,5,4,0,0,6,5,0]
-; AVX512DQ-SLOW-NEXT:    # zmm5 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
-; AVX512DQ-SLOW-NEXT:    vpermd (%rax), %zmm5, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm28, %zmm5
-; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm5
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm19, %zmm8, %zmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm28, %zmm9, %zmm9
+; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm9
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm29, %zmm0, %zmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm10[0,1,2,3],zmm8[4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm6
+; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm6
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm31, %zmm30, %zmm8
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm9 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm8
+; AVX512DQ-SLOW-NEXT:    vpbroadcastd (%rax), %ymm9
+; AVX512DQ-SLOW-NEXT:    vpbroadcastd 4(%rax), %ymm10
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512DQ-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm9
+; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm9
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm14, %zmm2
+; AVX512DQ-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm4
+; AVX512DQ-SLOW-NEXT:    vbroadcasti32x8 {{.*#+}} zmm2 = [0,5,4,0,0,6,5,0,0,5,4,0,0,6,5,0]
+; AVX512DQ-SLOW-NEXT:    # zmm2 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
+; AVX512DQ-SLOW-NEXT:    vpermd (%rax), %zmm2, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm27, %zmm2
+; AVX512DQ-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm2
 ; AVX512DQ-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm5, 128(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, (%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, 320(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm4, 256(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, 192(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, 64(%rax)
-; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm29, 384(%rax)
-; AVX512DQ-SLOW-NEXT:    addq $648, %rsp # imm = 0x288
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm2, 128(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm6, 320(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm1, 256(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm7, 192(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm0, 64(%rax)
+; AVX512DQ-SLOW-NEXT:    vmovdqa64 %zmm3, 384(%rax)
+; AVX512DQ-SLOW-NEXT:    addq $632, %rsp # imm = 0x278
 ; AVX512DQ-SLOW-NEXT:    vzeroupper
 ; AVX512DQ-SLOW-NEXT:    retq
 ;
@@ -6229,9 +6235,9 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm13
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128,128,128]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm6
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm15
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm14
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,u,u,u,u,u,u,12,13,14,15,128,128,u,u,u,u,u,u,u,u,u,u,16,17,128,128,u,u,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm15, %ymm7
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm14, %ymm7
 ; AVX512DQ-FAST-NEXT:    vporq %ymm6, %ymm7, %ymm25
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm4, %ymm10, %ymm4
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm6
@@ -6245,9 +6251,8 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm12, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm1, %ymm14
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %ymm15
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %ymm4
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm4, %ymm1
 ; AVX512DQ-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm21
@@ -6278,11 +6283,11 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %xmm1
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm5, %xmm8
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm5, %xmm30
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [2,1,3,3,8,8,9,9]
 ; AVX512DQ-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm19
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm15[3,3,3,3,7,7,7,7]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm14[3,3,3,3,7,7,7,7]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7,8,9],ymm3[10],ymm2[11,12],ymm3[13],ymm2[14,15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [2,2,2,3,8,8,8,9]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %xmm3
@@ -6310,54 +6315,52 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm27 = [0,0,0,1,8,9,9,11]
 ; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm3, %zmm27
 ; AVX512DQ-FAST-NEXT:    vprold $16, %ymm13, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm15[1,2,2,3,5,6,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm14[1,2,2,3,5,6,6,7]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm3[0,1],ymm0[2],ymm3[3,4],ymm0[5],ymm3[6,7,8,9],ymm0[10],ymm3[11,12],ymm0[13],ymm3[14,15]
 ; AVX512DQ-FAST-NEXT:    vpbroadcastd {{.*#+}} ymm5 = [18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21,18,19,20,21]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm13, %ymm3
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm15[0,0,2,1,4,4,6,5]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm14[0,0,2,1,4,4,6,5]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0,1,2],ymm7[3],ymm3[4,5],ymm7[6],ymm3[7,8,9,10],ymm7[11],ymm3[12,13],ymm7[14],ymm3[15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [2,2,3,3,10,9,11,10]
 ; AVX512DQ-FAST-NEXT:    vpermi2q %zmm0, %zmm3, %zmm28
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm15
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm8
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm15[4],xmm0[5],xmm15[5],xmm0[6],xmm15[6],xmm0[7],xmm15[7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm9
+; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm17
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm25, %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm8, %xmm18
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm30, %xmm9
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <0,0,1,1,12,13,u,15>
 ; AVX512DQ-FAST-NEXT:    vpermi2q %zmm2, %zmm1, %zmm25
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FAST-NEXT:    vpbroadcastd 8(%rax), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpandn %ymm1, %ymm2, %ymm1
+; AVX512DQ-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rax), %ymm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [12,13,128,128,128,128,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,128,128,128,128,16,17,128,128]
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm3, %ymm7
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm3, %ymm16
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm30
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21,18,19,20,21,24,25,26,27,22,23,22,23]
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[1,1,1,1,5,5,5,5]
-; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm8 = ymm1[0,1],ymm6[2],ymm1[3,4],ymm6[5],ymm1[6,7,8,9],ymm6[10],ymm1[11,12],ymm6[13],ymm1[14,15]
+; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm6[2],ymm1[3,4],ymm6[5],ymm1[6,7,8,9],ymm6[10],ymm1[11,12],ymm6[13],ymm1[14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm16
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm13
 ; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} ymm1 = ymm13[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm6 = ymm12[0,1,1,3,4,5,5,7]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm7 = ymm6[0,1],ymm1[2],ymm6[3,4],ymm1[5],ymm6[6,7,8,9],ymm1[10],ymm6[11,12],ymm1[13],ymm6[14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm14, %ymm3
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm14, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm15, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm4[0,0,2,1,4,4,6,5]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3],ymm1[4,5],ymm5[6],ymm1[7,8,9,10],ymm5[11],ymm1[12,13],ymm5[14],ymm1[15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = <0,1,u,3,10,10,11,11>
 ; AVX512DQ-FAST-NEXT:    vpermi2q %zmm1, %zmm21, %zmm29
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,4,u,u,u,5,u>
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rax), %ymm6
-; AVX512DQ-FAST-NEXT:    vpermd %ymm6, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpandn %ymm1, %ymm5, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm14
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm6, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,4,u,u,u,5,u>
+; AVX512DQ-FAST-NEXT:    vpermd %ymm6, %ymm2, %ymm2
+; AVX512DQ-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm14
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm13[14,15,12,13,u,u,u,u,u,u,u,u,u,u,u,u,30,31,28,29,u,u,u,u,30,31,28,29,u,u,u,u]
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm12[3,3,3,3,7,7,7,7]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm5[0],ymm2[1],ymm5[2,3],ymm2[4],ymm5[5,6,7,8],ymm2[9],ymm5[10,11],ymm2[12],ymm5[13,14,15]
@@ -6368,21 +6371,21 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0,1],ymm12[2],ymm13[3,4],ymm12[5],ymm13[6,7,8,9],ymm12[10],ymm13[11,12],ymm12[13],ymm13[14,15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [2,2,2,3,8,10,10,11]
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm21, %zmm13
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,26,27,28,29,26,27,28,29,30,31,30,31]
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm4[3,3,3,3,7,7,7,7]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm12[2],ymm2[3,4],ymm12[5],ymm2[6,7,8,9],ymm12[10],ymm2[11,12],ymm12[13],ymm2[14,15]
-; AVX512DQ-FAST-NEXT:    vprold $16, %ymm3, %ymm12
+; AVX512DQ-FAST-NEXT:    vprold $16, %ymm15, %ymm12
 ; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[1,2,2,3,5,6,6,7]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm12 = ymm4[0,1],ymm12[2],ymm4[3,4],ymm12[5],ymm4[6,7,8,9],ymm12[10],ymm4[11,12],ymm12[13],ymm4[14,15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [2,1,3,2,10,10,10,11]
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm31, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm22, %zmm17, %zmm13
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535,0,0,65535,65535,65535,65535,65535]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm22, %zmm18, %zmm13
 ; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rax), %zmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 (%rax), %zmm15
 ; AVX512DQ-FAST-NEXT:    vbroadcasti32x8 {{.*#+}} zmm4 = [30,5,0,0,31,6,0,31,30,5,0,0,31,6,0,31]
 ; AVX512DQ-FAST-NEXT:    # zmm4 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermi2d %zmm3, %zmm6, %zmm4
+; AVX512DQ-FAST-NEXT:    vpermi2d %zmm15, %zmm6, %zmm4
 ; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm4
 ; AVX512DQ-FAST-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7]
 ; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} xmm12 = [6,7,4,5,0,0,8,9,6,7,4,5,0,0,8,9]
@@ -6397,15 +6400,14 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    # xmm6 = xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm24, %xmm1
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm13 = xmm1[0,1,2,3,8,9,10,11,14,15,12,13,14,15,12,13]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm18, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm6, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm6
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [0,1,1,3,8,8,9,9]
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm6, %zmm22, %zmm13
 ; AVX512DQ-FAST-NEXT:    vprold $16, %xmm0, %xmm6
-; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm15[1,1,2,3]
+; AVX512DQ-FAST-NEXT:    vpshufd {{.*#+}} xmm2 = xmm8[1,1,2,3]
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm6[2],xmm2[3,4],xmm6[5],xmm2[6,7]
-; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm11, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %xmm2
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm6
@@ -6434,8 +6436,8 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpblendw {{.*#+}} ymm5 = ymm10[0,1],ymm5[2],ymm10[3,4],ymm5[5],ymm10[6,7,8,9],ymm5[10],ymm10[11,12],ymm5[13],ymm10[14,15]
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm12[0,2,3,3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,2,1]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,0,1,3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,2,3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm17[0,0,1,3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm16[2,2,2,3]
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,1,3,2]
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm0, %zmm31, %zmm5
 ; AVX512DQ-FAST-NEXT:    vpbroadcastd (%rax), %ymm0
@@ -6454,8 +6456,8 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpermt2q %zmm2, %zmm21, %zmm12
 ; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm12
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <6,u,u,u,7,u,u,7>
-; AVX512DQ-FAST-NEXT:    vpermd %ymm16, %ymm2, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm3, %zmm3
+; AVX512DQ-FAST-NEXT:    vpermd %ymm3, %ymm2, %ymm2
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm15, %zmm3
 ; AVX512DQ-FAST-NEXT:    vbroadcasti32x8 {{.*#+}} zmm5 = [0,13,4,0,0,14,5,0,0,13,4,0,0,14,5,0]
 ; AVX512DQ-FAST-NEXT:    # zmm5 = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vpermd %zmm3, %zmm5, %zmm3
@@ -6464,7 +6466,7 @@ define void @store_i16_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm5 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm9, %zmm9 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm11, %zmm9
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm23, %zmm17, %zmm19
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm23, %zmm18, %zmm19
 ; AVX512DQ-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm25, %zmm30
 ; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm30
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Reload
@@ -11706,7 +11708,7 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%r8), %ymm12
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm12[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[14,15],zero,zero,ymm12[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[16,17],zero,zero,ymm12[u,u],zero,zero
+; AVX512F-ONLY-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm12[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[16,17,u,u,u,u],zero,zero
 ; AVX512F-ONLY-SLOW-NEXT:    vpternlogq $248, %ymm9, %ymm7, %ymm6
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqa 96(%r9), %ymm15
 ; AVX512F-ONLY-SLOW-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -12365,7 +12367,7 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm9
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r8), %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm8[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[14,15],zero,zero,ymm8[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[16,17],zero,zero,ymm8[u,u],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm8[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[16,17,u,u,u,u],zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm13, %ymm9, %ymm12
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm13, %ymm14
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 96(%r9), %ymm13
@@ -13029,7 +13031,7 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%r8), %ymm12
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm12[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[14,15],zero,zero,ymm12[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[16,17],zero,zero,ymm12[u,u],zero,zero
+; AVX512DQ-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm12[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm12[16,17,u,u,u,u],zero,zero
 ; AVX512DQ-SLOW-NEXT:    vpternlogq $248, %ymm9, %ymm7, %ymm6
 ; AVX512DQ-SLOW-NEXT:    vmovdqa 96(%r9), %ymm15
 ; AVX512DQ-SLOW-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -13688,7 +13690,7 @@ define void @store_i16_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm9
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r8), %ymm8
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm8[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[14,15],zero,zero,ymm8[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[16,17],zero,zero,ymm8[u,u],zero,zero
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm8[u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[14,15,u,u,u,u],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[16,17,u,u,u,u],zero,zero
 ; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm13, %ymm9, %ymm12
 ; AVX512DQ-FAST-NEXT:    vmovdqa %ymm13, %ymm14
 ; AVX512DQ-FAST-NEXT:    vmovdqa 96(%r9), %ymm13

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
index 1f21eee473194bd..270bcc23b715820 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
@@ -3967,8 +3967,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm0, %ymm1
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,1,4,6,5,5]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,3,2]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm14 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
-; AVX512F-SLOW-NEXT:    vpandn %ymm0, %ymm14, %ymm0
+; AVX512F-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm25
 ; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm9, %ymm9
@@ -4064,63 +4063,63 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512F-FAST-LABEL: store_i8_stride5_vf64:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm6
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm7
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm6, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = <12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19>
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm7, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = <12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19>
+; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm3, %ymm1
 ; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm1
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm1, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm18
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm3
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm2
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128>
-; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm1
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm2, %xmm1
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm25
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm3, %xmm17
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm17
 ; AVX512F-FAST-NEXT:    vpor %xmm0, %xmm1, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm9
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm9, %ymm3
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm9, %ymm2
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm8
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u>
 ; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm8, %ymm4
-; AVX512F-FAST-NEXT:    vpor %ymm3, %ymm4, %ymm3
-; AVX512F-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-FAST-NEXT:    vpor %ymm2, %ymm4, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm10
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12>
-; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm10, %xmm3
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm10, %xmm2
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm26
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm11
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm4
-; AVX512F-FAST-NEXT:    vporq %xmm3, %xmm4, %xmm19
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm6[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm6[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm22
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[26],zero,ymm2[28],zero,zero,ymm2[27],zero,ymm2[29],zero,ymm2[31],zero,zero,ymm2[30],zero
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero,ymm2[25],zero,zero
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm23
+; AVX512F-FAST-NEXT:    vporq %xmm2, %xmm4, %xmm21
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm7[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm7[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm4, %zmm22
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[26],zero,ymm3[28],zero,zero,ymm3[27],zero,ymm3[29],zero,ymm3[31],zero,zero,ymm3[30],zero
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero,ymm3[25],zero,zero
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm23
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm8[27],zero,zero,ymm8[26],zero,ymm8[28],zero,ymm8[30],zero,zero,ymm8[29],zero,ymm8[31],zero,zero
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
 ; AVX512F-FAST-NEXT:    # ymm4 = mem[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm9, %ymm3
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm4, %ymm30
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm24
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm12
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm12, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm6
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
-; AVX512F-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm20
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm7
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm12
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm12, %ymm1
+; AVX512F-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm19
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm5
 ; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm5, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm4
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm4, %ymm1
-; AVX512F-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm21
+; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm4, %ymm1
+; AVX512F-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm20
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm1
 ; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm1, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm16
@@ -4128,9 +4127,9 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm25, %xmm1
 ; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm2
 ; AVX512F-FAST-NEXT:    vporq %xmm0, %xmm2, %xmm28
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm7
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm6
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm26, %xmm0
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm7, %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm6, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm2
 ; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm2, %xmm14
 ; AVX512F-FAST-NEXT:    vporq %xmm0, %xmm14, %xmm29
@@ -4142,26 +4141,25 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm15
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm14, %zmm27
-; AVX512F-FAST-NEXT:    vmovdqa64 (%r8), %zmm25
+; AVX512F-FAST-NEXT:    vmovdqa64 (%r8), %zmm26
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = <4,u,5,5,5,5,u,6,30,30,30,u,31,31,31,31>
-; AVX512F-FAST-NEXT:    vpermi2d %zmm25, %zmm0, %zmm31
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <4,u,5,5,5,5,u,6>
+; AVX512F-FAST-NEXT:    vpermi2d %zmm26, %zmm0, %zmm31
 ; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm0
-; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm15, %ymm15
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm26 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255]
-; AVX512F-FAST-NEXT:    vpandnq %ymm15, %ymm26, %ymm15
 ; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm1, %zmm26
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm15 = <4,u,5,5,5,5,u,6>
+; AVX512F-FAST-NEXT:    vpermd %ymm0, %ymm15, %ymm15
+; AVX512F-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm15
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm1, %zmm25
 ; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm9, %ymm9
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm30, %ymm13
-; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm12, %ymm15
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm12, %ymm1
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
-; AVX512F-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm8, %ymm8
-; AVX512F-FAST-NEXT:    vpshufb %ymm12, %ymm6, %ymm12
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,ymm6[26],zero,ymm6[28],zero,ymm6[30],zero,zero,ymm6[29],zero,ymm6[31],zero,zero
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm7, %ymm15
+; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm1
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
+; AVX512F-FAST-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm8, %ymm8
+; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm12, %ymm7
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm12[27],zero,zero,ymm12[26],zero,ymm12[28],zero,ymm12[30],zero,zero,ymm12[29],zero,ymm12[31],zero,zero
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3],xmm10[4],xmm11[4],xmm10[5],xmm11[5],xmm10[6],xmm11[6],xmm10[7],xmm11[7]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm30 = ymm9[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm5[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
@@ -4173,11 +4171,11 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm14 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm16, %xmm13
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm13[0],xmm3[0],xmm13[1],xmm3[1],xmm13[2],xmm3[2],xmm13[3],xmm3[3],xmm13[4],xmm3[4],xmm13[5],xmm3[5],xmm13[6],xmm3[6],xmm13[7],xmm3[7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
@@ -4190,42 +4188,42 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[0,0,1,1]
 ; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm28, %zmm3, %zmm3
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3],xmm7[4],xmm2[4],xmm7[5],xmm2[5],xmm7[6],xmm2[6],xmm7[7],xmm2[7]
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
 ; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm2, %xmm2
 ; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm29, %zmm2, %zmm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm25, %zmm0
-; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm7 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm7 = mem[0,0,1,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm7 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm19[0,0,1,1]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm26, %zmm0
+; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm6 = mem[0,0,1,1]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6, %zmm6 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm21[0,0,1,1]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm13 # 32-byte Folded Reload
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm7, %zmm16, %zmm13
-; AVX512F-FAST-NEXT:    vpor %ymm15, %ymm12, %ymm7
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm20, %zmm7
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm12 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400]
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm12, %ymm11, %ymm9
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm21, %zmm9
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm7, %zmm16, %zmm9
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm7 = zmm22[2,2,3,3,6,6,7,7]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm16, %zmm13
+; AVX512F-FAST-NEXT:    vpor %ymm7, %ymm15, %ymm6
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm19, %zmm6
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400]
+; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm7, %ymm11, %ymm9
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm20, %zmm9
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm16, %zmm9
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm6 = zmm22[2,2,3,3,6,6,7,7]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm11 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm11
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm12, %ymm1, %ymm6
-; AVX512F-FAST-NEXT:    vpandq %ymm12, %ymm30, %ymm1
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm11
+; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm7, %ymm1, %ymm12
+; AVX512F-FAST-NEXT:    vpandq %ymm7, %ymm30, %ymm1
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm7 = zmm24[2,2,3,3,6,6,7,7]
-; AVX512F-FAST-NEXT:    vporq %zmm7, %zmm1, %zmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm11, %zmm7, %zmm1
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm6, %zmm6
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm6 = zmm24[2,2,3,3,6,6,7,7]
+; AVX512F-FAST-NEXT:    vporq %zmm6, %zmm1, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm11, %zmm6, %zmm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm12, %zmm7
 ; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm4
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm4, %zmm4
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm7, %zmm4
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm7, %zmm6, %zmm4
 ; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm27
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm31
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm26
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm25
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <6,6,6,u,7,7,7,7,u,8,8,8,8,u,9,9>
-; AVX512F-FAST-NEXT:    vpermd %zmm25, %zmm1, %zmm1
+; AVX512F-FAST-NEXT:    vpermd %zmm26, %zmm1, %zmm1
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm1
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm3 = zmm3[0,0,1,1,4,4,5,5]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm2 = zmm2[0,0,1,1,4,4,5,5]
@@ -4233,7 +4231,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <u,0,0,0,0,u,1,1,9,9,u,10,10,10,10,u>
 ; AVX512F-FAST-NEXT:    vpermd %zmm0, %zmm3, %zmm0
 ; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm26, 64(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm25, 64(%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, (%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 128(%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm31, 256(%r9)

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
index 7893a799c207ab3..a7fffc339586896 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
@@ -7382,14 +7382,14 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512F-SLOW-LABEL: store_i8_stride7_vf64:
 ; AVX512F-SLOW:       # %bb.0:
-; AVX512F-SLOW-NEXT:    subq $1416, %rsp # imm = 0x588
+; AVX512F-SLOW-NEXT:    subq $1464, %rsp # imm = 0x5B8
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm1
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero,zero,zero,ymm1[18]
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, %ymm12
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm1, %ymm9
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[0,1,14],zero,ymm2[12,13,0,1,14,15],zero,ymm2[3,12,13,2,3,16],zero,ymm2[30,31,28,29,16,17],zero,ymm2[31,18,19,28,29,18],zero
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm2, %ymm9
+; AVX512F-SLOW-NEXT:    vmovdqa %ymm2, %ymm10
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -7400,44 +7400,46 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm8
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm8, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm28
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm17
 ; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,ymm0[17],zero,zero,zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm20
+; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0,13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0]
 ; AVX512F-SLOW-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm2, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm17
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm16
-; AVX512F-SLOW-NEXT:    vporq %ymm0, %ymm1, %ymm23
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %ymm10
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm11
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[27],zero,zero,zero,zero,ymm11[30],zero,ymm11[28],zero,zero,zero,zero,ymm11[31],zero,ymm11[29]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm10[25],zero,ymm10[23],zero,zero,zero,zero,ymm10[26],zero,ymm10[24],zero,zero
+; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm3, %ymm21
+; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %ymm13
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm14
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,zero,zero,ymm14[30],zero,ymm14[28],zero,zero,zero,zero,ymm14[31],zero,ymm14[29]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm14, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm13[25],zero,ymm13[23],zero,zero,zero,zero,ymm13[26],zero,ymm13[24],zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm6
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29],zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm6
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm11
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm11[30],zero,ymm11[28],zero,zero,zero,zero,ymm11[31],zero,ymm11[29],zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm11, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128]
 ; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm5, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm6, %ymm1
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm1, %ymm21
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %ymm5
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm4
 ; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128,27,128,25]
 ; AVX512F-SLOW-NEXT:    # ymm1 = mem[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm4, %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm20
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm19
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
@@ -7459,182 +7461,179 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm18
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm9, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm12[21],zero,ymm12[19],zero,zero,zero,zero,ymm12[22],zero,ymm12[20],zero,zero
+; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm10, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm9[21],zero,ymm9[19],zero,zero,zero,zero,ymm9[22],zero,ymm9[20],zero,zero
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rax), %ymm1
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa (%rax), %ymm3
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm25 = <u,5,4,u,5,u,4,u,20,21,u,23,u,21,u,23>
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm2 = ymm1[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512F-SLOW-NEXT:    vpermi2d %zmm0, %zmm2, %zmm25
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm3
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm3[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512F-SLOW-NEXT:    vpermi2d %zmm0, %zmm1, %zmm25
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm15
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm2, %xmm15, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm19
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm15, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm16
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm3, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm29
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm30
-; AVX512F-SLOW-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm2, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm23
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm2, %xmm30
+; AVX512F-SLOW-NEXT:    vpor %xmm0, %xmm1, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm0, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm7
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm1, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm8, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm8, %xmm22
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm7, %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm7, %xmm22
 ; AVX512F-SLOW-NEXT:    vpor %xmm0, %xmm2, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = <0,u,0,u,2,3,u,1,u,18,u,19,18,u,19,u>
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = <0,u,0,u,2,3,u,1,u,18,u,19,18,u,19,u>
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rax), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, (%rsp) # 16-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5,5,6]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpermi2d %zmm0, %zmm2, %zmm8
-; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpermi2d %zmm0, %zmm2, %zmm7
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%r9), %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %xmm13
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm0, %xmm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm26
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm13, %xmm9
-; AVX512F-SLOW-NEXT:    vporq %xmm8, %xmm9, %xmm24
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %xmm2
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm7 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm0, %xmm9
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm31
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm12 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm2, %xmm10
+; AVX512F-SLOW-NEXT:    vporq %xmm9, %xmm10, %xmm24
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm5, %ymm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm0
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm6, %ymm9
-; AVX512F-SLOW-NEXT:    vpor %ymm8, %ymm9, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm6, %ymm26
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm11, %ymm10
+; AVX512F-SLOW-NEXT:    vpor %ymm9, %ymm10, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm3
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero,zero,zero,zero,zero,ymm3[18]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = ymm0[0,1,14],zero,ymm0[12,13,0,1,14,15],zero,ymm0[3,12,13,2,3,16],zero,ymm0[30,31,28,29,16,17],zero,ymm0[31,18,19,28,29,18],zero
-; AVX512F-SLOW-NEXT:    vpor %ymm8, %ymm9, %ymm5
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = zero,zero,zero,ymm5[14],zero,zero,zero,zero,zero,zero,ymm5[15],zero,zero,zero,zero,zero,zero,ymm5[16],zero,zero,zero,zero,zero,zero,ymm5[17],zero,zero,zero,zero,zero,zero,ymm5[18]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm27
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm19, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm0[0,1,14],zero,ymm0[12,13,0,1,14,15],zero,ymm0[3,12,13,2,3,16],zero,ymm0[30,31,28,29,16,17],zero,ymm0[31,18,19,28,29,18],zero
+; AVX512F-SLOW-NEXT:    vpor %ymm9, %ymm10, %ymm5
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,zero,zero,ymm11[14],zero,zero,zero,zero,zero,zero,ymm11[15],zero,zero,zero,zero,zero,zero,ymm11[16],zero,zero,zero,zero,zero,zero,ymm11[17],zero,zero,zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm6
-; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm10, %ymm9
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm10, %ymm28
-; AVX512F-SLOW-NEXT:    vpor %ymm8, %ymm9, %ymm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm20, %ymm9
+; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm14, %ymm9
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm21, %ymm6
+; AVX512F-SLOW-NEXT:    vpshufb %ymm6, %ymm13, %ymm10
+; AVX512F-SLOW-NEXT:    vpor %ymm9, %ymm10, %ymm5
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm6
-; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm6, %xmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm20
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm9
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm9, %xmm4
-; AVX512F-SLOW-NEXT:    vporq %xmm5, %xmm4, %xmm21
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm19, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm2, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm10
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm5
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm10, %xmm7
-; AVX512F-SLOW-NEXT:    vporq %xmm4, %xmm7, %xmm19
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm9
+; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm9, %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm10
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm10, %xmm4
+; AVX512F-SLOW-NEXT:    vporq %xmm8, %xmm4, %xmm21
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm5, %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm5, %xmm11
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm6
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm23, %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm6, %xmm8
+; AVX512F-SLOW-NEXT:    vporq %xmm4, %xmm8, %xmm19
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %xmm5
 ; AVX512F-SLOW-NEXT:    vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm5, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm7
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm7, %xmm6
-; AVX512F-SLOW-NEXT:    vpor %xmm4, %xmm6, %xmm4
+; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm5, %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufb %xmm12, %xmm8, %xmm7
+; AVX512F-SLOW-NEXT:    vpor %xmm4, %xmm7, %xmm4
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm23, %zmm0, %zmm4
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm7[8],xmm5[8],xmm7[9],xmm5[9],xmm7[10],xmm5[10],xmm7[11],xmm5[11],xmm7[12],xmm5[12],xmm7[13],xmm5[13],xmm7[14],xmm5[14],xmm7[15],xmm5[15]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm4 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm8[8],xmm5[8],xmm8[9],xmm5[9],xmm8[10],xmm5[10],xmm8[11],xmm5[11],xmm8[12],xmm5[12],xmm8[13],xmm5[13],xmm8[14],xmm5[14],xmm8[15],xmm5[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm6, %xmm6
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm27
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm6[0,1,0,1],zmm4[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm7, %xmm7
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm29
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm7[0,1,0,1],zmm4[4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa (%rax), %xmm12
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm12[0,1,2,3,4,5,5,6]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rax), %xmm13
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm13[0,1,2,3,4,5,5,6]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; AVX512F-SLOW-NEXT:    vpandn %ymm4, %ymm11, %ymm4
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = zero,ymm1[13],zero,zero,zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm11, %zmm4, %zmm23
+; AVX512F-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = zero,ymm3[13],zero,zero,zero,zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm12, %zmm4, %zmm23
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm18, %ymm4
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[30],zero,ymm4[28],zero,zero,zero,zero,ymm4[31],zero,ymm4[29],zero,zero
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm18
 ; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm5 = [13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm16, %ymm14
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm5, %ymm14, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm29
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm28
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm8[27],zero,zero,zero,zero,ymm8[30],zero,ymm8[28],zero,zero,zero,zero,ymm8[31],zero,ymm8[29]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm11
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm7
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm13[0],xmm7[0],xmm13[1],xmm7[1],xmm13[2],xmm7[2],xmm13[3],xmm7[3],xmm13[4],xmm7[4],xmm13[5],xmm7[5],xmm13[6],xmm7[6],xmm13[7],xmm7[7]
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm12 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm5[27],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm12
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm7
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3],xmm2[4],xmm7[4],xmm2[5],xmm7[5],xmm2[6],xmm7[6],xmm2[7],xmm7[7]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm11[0,1,2,3],zmm4[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm4 = zmm12[0,1,2,3],zmm4[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm1 = ymm0[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm0[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm17
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[2,2,3,3,6,6,7,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,2,3,3,6,6,7,7]
 ; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10]
-; AVX512F-SLOW-NEXT:    vmovdqa %ymm3, %ymm6
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm3, %ymm11
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm11, %zmm26
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm27, %ymm8
+; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm8, %ymm12
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm12, %zmm20
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero,ymm3[29],zero,zero,zero
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
+; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
 ; AVX512F-SLOW-NEXT:    vpshuflw $233, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm4 = mem[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[0,0,1,1,4,4,5,5]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm4, %zmm0
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm0
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3],xmm0[4],xmm15[4],xmm0[5],xmm15[5],xmm0[6],xmm15[6],xmm0[7],xmm15[7]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm16
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm15[8],xmm0[8],xmm15[9],xmm0[9],xmm15[10],xmm0[10],xmm15[11],xmm0[11],xmm15[12],xmm0[12],xmm15[13],xmm0[13],xmm15[14],xmm0[14],xmm15[15],xmm0[15]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm10[8],xmm2[9],xmm10[9],xmm2[10],xmm10[10],xmm2[11],xmm10[11],xmm2[12],xmm10[12],xmm2[13],xmm10[13],xmm2[14],xmm10[14],xmm2[15],xmm10[15]
-; AVX512F-SLOW-NEXT:    vmovdqa %xmm2, %xmm11
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3],xmm0[4],xmm15[4],xmm0[5],xmm15[5],xmm0[6],xmm15[6],xmm0[7],xmm15[7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm3, %xmm16
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm15[8],xmm0[8],xmm15[9],xmm0[9],xmm15[10],xmm0[10],xmm15[11],xmm0[11],xmm15[12],xmm0[12],xmm15[13],xmm0[13],xmm15[14],xmm0[14],xmm15[15],xmm0[15]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm11[8],xmm6[8],xmm11[9],xmm6[9],xmm11[10],xmm6[10],xmm11[11],xmm6[11],xmm11[12],xmm6[12],xmm11[13],xmm6[13],xmm11[14],xmm6[14],xmm11[15],xmm6[15]
+; AVX512F-SLOW-NEXT:    vmovdqa %xmm11, %xmm12
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm4, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm1, %xmm1
+; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm3, %xmm3
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm30
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm3, %zmm0, %zmm30
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm22, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm20, %xmm5
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm5[8],xmm9[8],xmm5[9],xmm9[9],xmm5[10],xmm9[10],xmm5[11],xmm9[11],xmm5[12],xmm9[12],xmm5[13],xmm9[13],xmm5[14],xmm9[14],xmm5[15],xmm9[15]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm9[8],xmm10[8],xmm9[9],xmm10[9],xmm9[10],xmm10[10],xmm9[11],xmm10[11],xmm9[12],xmm10[12],xmm9[13],xmm10[13],xmm9[14],xmm10[14],xmm9[15],xmm10[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm4 = <2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm3, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
 ; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm22
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm13[8],xmm7[8],xmm13[9],xmm7[9],xmm13[10],xmm7[10],xmm13[11],xmm7[11],xmm13[12],xmm7[12],xmm13[13],xmm7[13],xmm13[14],xmm7[14],xmm13[15],xmm7[15]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm27, %xmm1
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm29, %xmm1
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
 ; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm24 = zmm24[0,1,0,1],zmm0[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm13
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm26, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm7
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm31
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm29
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm0[23],zero,ymm0[21,22,23,26],zero,ymm0[24],zero,ymm0[28,29,26,27]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm11 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,ymm0[23],zero,ymm0[21,22,23,26],zero,ymm0[24],zero,ymm0[28,29,26,27]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,ymm0[18,19,20,21],zero,ymm0[19],zero,ymm0[25,26,27,22],zero,ymm0[20],zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm20
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20],zero,zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm29, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm26
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm8[21],zero,ymm8[19],zero,zero,zero,zero,ymm8[22],zero,ymm8[20],zero,zero
+; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm28, %ymm0
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm1, %ymm3
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm14[25],zero,ymm14[23],zero,zero,zero,zero,ymm14[26],zero,ymm14[24],zero,zero
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -7648,40 +7647,40 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm1
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm8, %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm29
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm5, %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm2, %ymm27
 ; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
 ; AVX512F-SLOW-NEXT:    # ymm2 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm8, %ymm8
-; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm8, %ymm27
+; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm5, %ymm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm5, %ymm28
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
-; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm0, %ymm31
 ; AVX512F-SLOW-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = zero,ymm0[13],zero,zero,zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,zero,zero,ymm0[17],zero,zero
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm14 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm14 = ymm14[0,1,1,3,4,5,5,7]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,3,2]
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} ymm28 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; AVX512F-SLOW-NEXT:    vpandnq %ymm14, %ymm28, %ymm14
+; AVX512F-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm14, %ymm14
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm14, %zmm2, %zmm2
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3],xmm9[4],xmm5[4],xmm9[5],xmm5[5],xmm9[6],xmm5[6],xmm9[7],xmm5[7]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm15, %xmm15
-; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm21, %zmm9, %zmm14
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3],xmm10[4],xmm11[4],xmm10[5],xmm11[5],xmm10[6],xmm11[6],xmm10[7],xmm11[7]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm10 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm10, %xmm10
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm21, %zmm10, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm0, %xmm8
-; AVX512F-SLOW-NEXT:    vpshufb %xmm10, %xmm9, %xmm9
-; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm19, %zmm9, %zmm9
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm4[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm18[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm0, %xmm10
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm6, %xmm6
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm19, %zmm6, %zmm6
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm4[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm18[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm19 = ymm3[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm1[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm7[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm6[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm11[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm8[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm17, %ymm1
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm1 = ymm1[1,2,2,3,4,5,6,7,9,10,10,11,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm21 = ymm1[0,0,1,1,4,4,5,5]
@@ -7691,29 +7690,28 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
 ; AVX512F-SLOW-NEXT:    vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # zmm1 = zmm1[0,1,0,1],mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm12[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm12[1,1,0,0,4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[0,1,2,0]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm6, %zmm3
-; AVX512F-SLOW-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm19, %ymm6
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # zmm6 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vporq %zmm6, %zmm0, %zmm0
-; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm6 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
-; AVX512F-SLOW-NEXT:    # ymm6 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpand %ymm6, %ymm13, %ymm7
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm3 = xmm13[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm13[1,1,0,0,4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[0,1,2,0]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm8, %zmm3
+; AVX512F-SLOW-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm19, %ymm8
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm8 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vporq %zmm8, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm8 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
+; AVX512F-SLOW-NEXT:    vpand %ymm7, %ymm8, %ymm7
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
 ; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # zmm7 = mem[2,3,2,3,6,7,6,7]
 ; AVX512F-SLOW-NEXT:    vporq %zmm7, %zmm5, %zmm5
 ; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # zmm7 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm12 = zmm26[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm12
-; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm12
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm9 = zmm20[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm9
+; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm9
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm5, %zmm12
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm5, %zmm9
 ; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # zmm0 = mem[2,3,2,3,6,7,6,7]
 ; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Folded Reload
@@ -7728,80 +7726,80 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm30[0,1,0,1,4,5,4,5]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm7 = zmm22[0,1,0,1,4,5,4,5]
 ; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm5, %zmm7
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm6, %ymm10, %ymm11
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm8, %ymm12, %ymm14
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm21[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $236, %ymm6, %ymm4, %ymm0
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm8[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm11, %zmm4
+; AVX512F-SLOW-NEXT:    vpternlogq $236, %ymm8, %ymm4, %ymm0
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm10[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm14, %zmm4
 ; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm5 = mem[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshufhw $190, {{[-0-9]+}}(%r{{[sb]}}p), %ymm6 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm6 = mem[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[2,2,3,3,6,6,7,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm6
+; AVX512F-SLOW-NEXT:    vpshufhw $190, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm8 = mem[0,1,2,3,6,7,7,6,8,9,10,11,14,15,15,14]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[2,2,3,3,6,6,7,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpternlogq $236, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm8
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm15[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm5
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm6, %zmm5
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm31[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm20[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpor %ymm4, %ymm8, %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm8, %zmm4
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm0
-; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm8 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm10 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm10 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm11 = mem[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshuflw $5, {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm8, %zmm5
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
+; AVX512F-SLOW-NEXT:    vpternlogq $184, %zmm4, %zmm8, %zmm5
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm29[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm26[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpor %ymm4, %ymm11, %ymm4
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm10, %zmm4
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm10 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm10, %zmm0
+; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm12 = mem[0,1,0,1]
+; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # ymm14 = mem[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshuflw $5, (%rsp), %xmm15 # 16-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # xmm15 = mem[1,1,0,0,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm15 = xmm15[0,1,2,0]
 ; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm17 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm17 = mem[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm29[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm18 = ymm27[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm19 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm19 = mem[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm27[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm20 = ymm28[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm21 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm21 = mem[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %ymm22 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    # ymm22 = mem[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm4, %zmm6, %zmm0
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm4 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm6 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm6
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm22 = ymm31[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm4, %zmm8, %zmm0
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm11, %zmm4 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm12, %zmm8 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm8
 ; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
 ; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm23
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm23
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm23
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm15[0,0,1,0]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm11, %zmm4
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm4
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm14, %zmm4
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm4
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm4
 ; AVX512F-SLOW-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # zmm5 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm5
-; AVX512F-SLOW-NEXT:    vporq %ymm17, %ymm18, %ymm6
-; AVX512F-SLOW-NEXT:    vporq %ymm19, %ymm20, %ymm8
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm6
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm8[0,1,2,3],zmm6[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm25
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm5
+; AVX512F-SLOW-NEXT:    vporq %ymm17, %ymm18, %ymm8
+; AVX512F-SLOW-NEXT:    vporq %ymm19, %ymm20, %ymm9
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm8
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm8 = zmm9[0,1,2,3],zmm8[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm25
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm25
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm24
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm24
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm24
-; AVX512F-SLOW-NEXT:    vporq %ymm21, %ymm22, %ymm6
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm6
-; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
-; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm6 = zmm7[0,1,2,3],zmm6[4,5,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm2
+; AVX512F-SLOW-NEXT:    vporq %ymm21, %ymm22, %ymm7
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
+; AVX512F-SLOW-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512F-SLOW-NEXT:    vshufi64x2 {{.*#+}} zmm7 = zmm8[0,1,2,3],zmm7[4,5,6,7]
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm2
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm14[0,1,0,1,4,5,4,5]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm6 = zmm9[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Folded Reload
+; AVX512F-SLOW-NEXT:    # zmm0 = mem[0,1,0,1,4,5,4,5]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[0,1,0,1,4,5,4,5]
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm6
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm3[0,0,1,0,4,4,5,4]
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
@@ -7814,33 +7812,35 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm5, 384(%rax)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm4, 192(%rax)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm23, 64(%rax)
-; AVX512F-SLOW-NEXT:    addq $1416, %rsp # imm = 0x588
+; AVX512F-SLOW-NEXT:    addq $1464, %rsp # imm = 0x5B8
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
 ; AVX512F-ONLY-FAST-LABEL: store_i8_stride7_vf64:
 ; AVX512F-ONLY-FAST:       # %bb.0:
-; AVX512F-ONLY-FAST-NEXT:    subq $1496, %rsp # imm = 0x5D8
+; AVX512F-ONLY-FAST-NEXT:    subq $1256, %rsp # imm = 0x4E8
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm15[30],zero,ymm15[28],zero,zero,zero,zero,ymm15[31],zero,ymm15[29],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm15, %ymm17
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm7[25],zero,ymm7[23],zero,zero,zero,zero,ymm7[26],zero,ymm7[24],zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm1, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm2, %ymm13
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm15[30],zero,ymm15[28],zero,zero,zero,zero,ymm15[31],zero,ymm15[29],zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27],zero,ymm3[25]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm16
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm17
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %ymm4
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm18
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[25],zero,ymm4[23],zero,zero,zero,zero,ymm4[26],zero,ymm4[24],zero,zero
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
@@ -7853,446 +7853,431 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero,zero,zero,ymm1[18]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm23
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,1,14],zero,ymm1[12,13,0,1,14,15],zero,ymm1[3,12,13,2,3,16],zero,ymm1[30,31,28,29,16,17],zero,ymm1[31,18,19,28,29,18],zero
 ; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm30
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %ymm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm26
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm10, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm3, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0,13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0]
-; AVX512F-ONLY-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm29
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm31
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm23
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm18
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm1, %xmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm5, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm21
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm11
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm1, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm1, %xmm28
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm11, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm0, %xmm5, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %xmm9
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm0, %xmm16
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm25
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm5, (%rsp) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0,13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0]
+; AVX512F-ONLY-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm30
+; AVX512F-ONLY-FAST-NEXT:    vporq %ymm3, %ymm5, %ymm24
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdx), %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rcx), %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm6, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm6, %xmm28
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm9, %xmm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm3, %xmm29
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm5, %xmm6, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rdi), %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rsi), %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm6, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm10, %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm10, %xmm27
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm5, %xmm9, %xmm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r9), %xmm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%r8), %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm15, %xmm9
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm9, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm12, %xmm13, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm7, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm7, %ymm24
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm10, %xmm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm0, %xmm21
+; AVX512F-ONLY-FAST-NEXT:    vporq %xmm9, %xmm12, %xmm22
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm13, %ymm7
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm6, %ymm7, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,ymm15[14],zero,zero,zero,zero,zero,zero,ymm15[15],zero,zero,zero,zero,zero,zero,ymm15[16],zero,zero,zero,zero,zero,zero,ymm15[17],zero,zero,zero,zero,zero,zero,ymm15[18]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm3[0,1,14],zero,ymm3[12,13,0,1,14,15],zero,ymm3[3,12,13,2,3,16],zero,ymm3[30,31,28,29,16,17],zero,ymm3[31,18,19,28,29,18],zero
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm6, %ymm7, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm19, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm29, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm6, %ymm4, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm3, %ymm6, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm4, %xmm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm7, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm3, %xmm1, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm18, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm3, %xmm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm1, %xmm2, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm13, %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm14, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm7, %ymm2, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,ymm7[14],zero,zero,zero,zero,zero,zero,ymm7[15],zero,zero,zero,zero,zero,zero,ymm7[16],zero,zero,zero,zero,zero,zero,ymm7[17],zero,zero,zero,zero,zero,zero,ymm7[18]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[0,1,14],zero,ymm7[12,13,0,1,14,15],zero,ymm7[3,12,13,2,3,16],zero,ymm7[30,31,28,29,16,17],zero,ymm7[31,18,19,28,29,18],zero
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm2, %ymm7, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %ymm18, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm30, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rsi), %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm11, %xmm13, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdi), %xmm9
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm9, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vporq %xmm0, %xmm2, %xmm31
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rcx), %xmm14
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm14, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rdx), %xmm8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm19, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm8, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r9), %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm2, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm2, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, %xmm3
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%r8), %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpor %xmm1, %xmm2, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vpor %xmm0, %xmm2, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero,ymm0[27],zero,ymm0[25]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm6
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero,ymm0[27],zero,ymm0[25]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm23, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm12[21],zero,ymm12[19],zero,zero,zero,zero,ymm12[22],zero,ymm12[20],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm30, %ymm15
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm15[25],zero,ymm15[23],zero,zero,zero,zero,ymm15[26],zero,ymm15[24],zero,zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
-; AVX512F-ONLY-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm15, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm30, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm26, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm11[25],zero,ymm11[23],zero,zero,zero,zero,ymm11[26],zero,ymm11[24],zero,zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[18],zero,zero,zero,zero,ymm11[21],zero,ymm11[19],zero,zero,zero,zero,ymm11[22],zero,ymm11[20]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512F-ONLY-FAST-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
-; AVX512F-ONLY-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm10, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm10, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm29
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512F-ONLY-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
+; AVX512F-ONLY-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm19
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm30
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm23, %zmm0, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,0,1],zmm1[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,0,1],zmm0[4,5,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm23 = [2,2,3,3,2,2,3,3]
-; AVX512F-ONLY-FAST-NEXT:    # ymm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [2,2,3,3,2,2,3,3]
+; AVX512F-ONLY-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rax), %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm2, %ymm23, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; AVX512F-ONLY-FAST-NEXT:    vpandn %ymm2, %ymm3, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rax), %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm10[30],zero,ymm10[28],zero,zero,zero,zero,ymm10[31],zero,ymm10[29],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm26
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,6]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm0, %ymm2, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa (%rax), %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm5, %ymm18
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
 ; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm31, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm30
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm22, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero,ymm0[29]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm31 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512F-ONLY-FAST-NEXT:    # ymm31 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm31, %ymm2, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm16, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3],xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm3[0,1,2,3],zmm2[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm20, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm2, %xmm22
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm12[8],xmm5[8],xmm12[9],xmm5[9],xmm12[10],xmm5[10],xmm12[11],xmm5[11],xmm12[12],xmm5[12],xmm12[13],xmm5[13],xmm12[14],xmm5[14],xmm12[15],xmm5[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm5, %xmm20
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm12, %xmm21
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm25
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm26 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
+; AVX512F-ONLY-FAST-NEXT:    # ymm26 = mem[0,1,2,3,0,1,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm26, %ymm0, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3],xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm1[0,1,2,3],zmm0[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm29, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm28, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm14[8],xmm8[8],xmm14[9],xmm8[9],xmm14[10],xmm8[10],xmm14[11],xmm8[11],xmm14[12],xmm8[12],xmm14[13],xmm8[13],xmm14[14],xmm8[14],xmm14[15],xmm8[15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm28, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3],xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm11[8],xmm0[9],xmm11[9],xmm0[10],xmm11[10],xmm0[11],xmm11[11],xmm0[12],xmm11[12],xmm0[13],xmm11[13],xmm0[14],xmm11[14],xmm0[15],xmm11[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm17, %xmm3
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm3[8],xmm7[8],xmm3[9],xmm7[9],xmm3[10],xmm7[10],xmm3[11],xmm7[11],xmm3[12],xmm7[12],xmm3[13],xmm7[13],xmm3[14],xmm7[14],xmm3[15],xmm7[15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm2, %xmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm9[8],xmm10[8],xmm9[9],xmm10[9],xmm9[10],xmm10[10],xmm9[11],xmm10[11],xmm9[12],xmm10[12],xmm9[13],xmm10[13],xmm9[14],xmm10[14],xmm9[15],xmm10[15]
-; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm8, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm24, %ymm11
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm27 = ymm1[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm13, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm28 = ymm1[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm28
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm27, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm6, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm13[8],xmm9[8],xmm13[9],xmm9[9],xmm13[10],xmm9[10],xmm13[11],xmm9[11],xmm13[12],xmm9[12],xmm13[13],xmm9[13],xmm13[14],xmm9[14],xmm13[15],xmm9[15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm27
+; AVX512F-ONLY-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm10[8],xmm15[8],xmm10[9],xmm15[9],xmm10[10],xmm15[10],xmm10[11],xmm15[11],xmm10[12],xmm15[12],xmm10[13],xmm15[13],xmm10[14],xmm15[14],xmm10[15],xmm15[15]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm22[0,1,0,1],zmm1[0,1,0,1]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa 32(%rax), %xmm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm0, %ymm23, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,5,6]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm0, %xmm29
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm2, %ymm0
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm11, %ymm10
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm15[9,u,7,u,u,u,u,10,u,8,u,u,u,u,11,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm24 = ymm0[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm29, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm13, %ymm8
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm15[21],zero,ymm15[19],zero,zero,zero,zero,ymm15[22],zero,ymm15[20],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm5
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm20, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm1[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm19, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm1[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm30, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm2, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm2, %ymm30
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm19 = ymm1[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm18, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm3
 ; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [4,5,4,5,5,7,4,5]
 ; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm0, %ymm1, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm25 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; AVX512F-ONLY-FAST-NEXT:    vpandnq %ymm0, %ymm25, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29]
-; AVX512F-ONLY-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm15, %ymm9
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm17 = ymm9[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3],xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29]
+; AVX512F-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm17, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm3, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm17 = ymm15[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm29
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm25 = ymm13[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3],xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5>
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm5, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm13, %xmm7, %xmm7
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm1 # 16-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm1[23],zero,ymm1[23,24,25,26],zero,ymm1[24],zero,ymm1[30,31]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm1, %ymm30
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm19 = ymm7[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm6, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29],zero,zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm7[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm0[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm12[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm26[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm20, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm21, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm22, %xmm4
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm13
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm11[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm1, %xmm12, %xmm1
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
-; AVX512F-ONLY-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm15, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm18 = ymm13[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, %xmm31, %zmm0, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3],xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm7
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm13
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm0[23],zero,ymm0[23,24,25,26],zero,ymm0[24],zero,ymm0[30,31]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm0, %ymm25
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm8[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa %ymm12, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm12[30],zero,ymm12[28],zero,zero,zero,zero,ymm12[31],zero,ymm12[29],zero,zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm9, %ymm2, %ymm9
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm11 # 16-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[9,u,7,u,u,u,u,10,u,8,u,u,u,u,11,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm23[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm11[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm31 # 16-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
+; AVX512F-ONLY-FAST-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm3, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm30, %ymm3
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm13
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm2, %ymm1
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm3, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm6, %ymm14, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
-; AVX512F-ONLY-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm5, %ymm7, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm12, %ymm9, %ymm9
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm9, %zmm6
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
+; AVX512F-ONLY-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, %ymm9, %ymm5, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm0, %zmm7
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm10, %ymm8, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm0, %zmm7
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm4, %ymm10, %ymm4
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm8, %ymm14, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm4, %ymm9, %ymm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm0, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vpandq %ymm5, %ymm27, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm28, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm0, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vpandq %ymm9, %ymm22, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm20, %zmm0
 ; AVX512F-ONLY-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # zmm2 = mem[2,3,2,3,6,7,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vporq %zmm2, %zmm0, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpandq %ymm31, %ymm24, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpandq %ymm26, %ymm19, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm2, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm3 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vporq %zmm3, %zmm2, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vporq %zmm4, %zmm2, %zmm2
 ; AVX512F-ONLY-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpandq %ymm31, %ymm25, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm19, %zmm0
-; AVX512F-ONLY-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm3 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vporq %zmm3, %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm0, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,0,1,4,5,4,5]
-; AVX512F-ONLY-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm8 = mem[0,1,0,1,4,5,4,5]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm0, %zmm8
-; AVX512F-ONLY-FAST-NEXT:    vpandq %ymm31, %ymm1, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm12, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vpandq %ymm26, %ymm18, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm15, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm4 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vporq %zmm4, %zmm0, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm4, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} zmm2 = zmm28[0,1,0,1,4,5,4,5]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} zmm8 = zmm27[0,1,0,1,4,5,4,5]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm4, %zmm8
+; AVX512F-ONLY-FAST-NEXT:    vpandq %ymm26, %ymm13, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # zmm2 = mem[2,3,2,3,6,7,6,7]
 ; AVX512F-ONLY-FAST-NEXT:    vporq %zmm2, %zmm1, %zmm1
 ; AVX512F-ONLY-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # zmm2 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm5 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vporq %zmm2, %zmm5, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm0, %zmm22
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm26 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm26 = zmm0[0,1,0,1],mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[1,1,0,0,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,0,1,2,0,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm1, %ymm2, %ymm19
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm0[1,1,0,0,4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm5, %ymm2, %ymm17
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm10
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm5
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
-; AVX512F-ONLY-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm29, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm0, %ymm13
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[25],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm12, %ymm0, %ymm12
-; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
+; AVX512F-ONLY-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm6 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vporq %zmm2, %zmm6, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm4, %zmm9
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm18 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm18 = zmm1[0,1,0,1],mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %xmm29, %xmm3
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[1,1,0,0,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,0,1,2,0,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm2, %ymm4, %ymm19
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm1[1,1,0,0,4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm6, %ymm4, %ymm17
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm3, %xmm10
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %xmm6, %xmm1, %xmm6
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
+; AVX512F-ONLY-FAST-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm12
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu (%rsp), %ymm1 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm11
+; AVX512F-ONLY-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
+; AVX512F-ONLY-FAST-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm25, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm15
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm2[23],zero,ymm2[23,24,25,26],zero,ymm2[24],zero,ymm2[30,31]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm14, %ymm2, %ymm14
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm3[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [4,5,4,5,5,7,4,5]
+; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm4, %ymm2, %ymm20
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512F-ONLY-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # zmm22 = mem[2,3,2,3,6,7,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm22
+; AVX512F-ONLY-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %ymm30, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm1, %ymm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm9[23],zero,ymm9[23,24,25,26],zero,ymm9[24],zero,ymm9[30,31]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb %ymm0, %ymm9, %ymm0
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpshuflw {{.*#+}} ymm15 = ymm4[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [4,5,4,5,5,7,4,5]
-; AVX512F-ONLY-FAST-NEXT:    vpermd %ymm15, %ymm9, %ymm20
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512F-ONLY-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512F-ONLY-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # zmm24 = mem[2,3,2,3,6,7,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm24
-; AVX512F-ONLY-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    # ymm25 = mem[0,1,0,1]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    # ymm23 = mem[0,1,0,1]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,1,0]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,1,0]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,1,0]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
 ; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm25 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm25
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm25, %zmm18
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm15, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm16
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm3 # 32-byte Folded Reload
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm3
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm2, %ymm13, %ymm2
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm23 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm23, %zmm24
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm2, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm21
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm0 # 32-byte Folded Reload
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm0
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm12, %ymm15, %ymm2
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm4[0,1,2,3],zmm2[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm23
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm23
+; AVX512F-ONLY-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm7[0,1,2,3],zmm2[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm16
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm16
 ; AVX512F-ONLY-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
 ; AVX512F-ONLY-FAST-NEXT:    # zmm2 = mem[0,1,0,1,4,5,4,5]
-; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} zmm6 = zmm11[0,1,0,1,4,5,4,5]
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm6
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm17, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm26, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm2
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm1, %ymm14, %ymm1
-; AVX512F-ONLY-FAST-NEXT:    vpor %ymm0, %ymm12, %ymm0
+; AVX512F-ONLY-FAST-NEXT:    vpermq {{.*#+}} zmm5 = zmm31[0,1,0,1,4,5,4,5]
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm5
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm17, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm2
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm1, %ymm13, %ymm1
+; AVX512F-ONLY-FAST-NEXT:    vpor %ymm11, %ymm14, %ymm5
 ; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[4,5,6,7]
-; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm20, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm22, %zmm1
+; AVX512F-ONLY-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm5[0,1,2,3],zmm1[4,5,6,7]
+; AVX512F-ONLY-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm20, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm4
+; AVX512F-ONLY-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm4
 ; AVX512F-ONLY-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm1, 128(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm4, 128(%rax)
 ; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm23, 320(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm3, 256(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 192(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm18, 64(%rax)
-; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, 384(%rax)
-; AVX512F-ONLY-FAST-NEXT:    addq $1496, %rsp # imm = 0x5D8
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm16, 320(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm0, 256(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm21, 192(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm24, 64(%rax)
+; AVX512F-ONLY-FAST-NEXT:    vmovdqa64 %zmm22, 384(%rax)
+; AVX512F-ONLY-FAST-NEXT:    addq $1256, %rsp # imm = 0x4E8
 ; AVX512F-ONLY-FAST-NEXT:    vzeroupper
 ; AVX512F-ONLY-FAST-NEXT:    retq
 ;
 ; AVX512DQ-FAST-LABEL: store_i8_stride7_vf64:
 ; AVX512DQ-FAST:       # %bb.0:
-; AVX512DQ-FAST-NEXT:    subq $1496, %rsp # imm = 0x5D8
+; AVX512DQ-FAST-NEXT:    subq $1256, %rsp # imm = 0x4E8
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %ymm7
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %ymm15
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm15[30],zero,ymm15[28],zero,zero,zero,zero,ymm15[31],zero,ymm15[29],zero,zero
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm15, %ymm17
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm7[25],zero,ymm7[23],zero,zero,zero,zero,ymm7[26],zero,ymm7[24],zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm1, %ymm14
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[25],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm2, %ymm13
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %ymm15
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm3
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm15[30],zero,ymm15[28],zero,zero,zero,zero,ymm15[31],zero,ymm15[29],zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[23],zero,zero,zero,zero,ymm3[26],zero,ymm3[24],zero,zero,zero,zero,ymm3[27],zero,ymm3[25]
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %ymm2
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm16
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[23],zero,zero,zero,zero,ymm2[26],zero,ymm2[24],zero,zero,zero,zero,ymm2[27],zero,ymm2[25]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm17
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %ymm4
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %ymm1
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm18
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm4[25],zero,ymm4[23],zero,zero,zero,zero,ymm4[26],zero,ymm4[24],zero,zero
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
@@ -8305,420 +8290,403 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %ymm1
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero,zero,zero,ymm1[18]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm26
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm23
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[0,1,14],zero,ymm1[12,13,0,1,14,15],zero,ymm1[3,12,13,2,3,16],zero,ymm1[30,31,28,29,16,17],zero,ymm1[31,18,19,28,29,18],zero
 ; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm1, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm30
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %ymm10
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm26
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %ymm1
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm10, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm25
-; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm3
+; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm3, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm22
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm2
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0,13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0]
-; AVX512DQ-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm2, %ymm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm29
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm31
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm23
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %xmm5
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm2, %xmm18
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm1, %xmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm5, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm5, %xmm21
-; AVX512DQ-FAST-NEXT:    vpor %xmm0, %xmm1, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm11
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm1, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm1, %xmm28
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm11, %xmm5
-; AVX512DQ-FAST-NEXT:    vpor %xmm0, %xmm5, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %xmm9
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm0, %xmm16
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm0, %ymm3
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm25
+; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm5, (%rsp) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0,13,0,0,0,128,16,128,14,0,0,0,128,17,128,15,0]
+; AVX512DQ-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm30
+; AVX512DQ-FAST-NEXT:    vporq %ymm3, %ymm5, %ymm24
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdx), %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rcx), %xmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm6, %xmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm6, %xmm28
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm9, %xmm19
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm3, %xmm29
+; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm6, %xmm3
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rdi), %xmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rsi), %xmm6
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm11 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm6, %xmm5
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm10, %xmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm10, %xmm27
+; AVX512DQ-FAST-NEXT:    vpor %xmm5, %xmm9, %xmm5
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r9), %xmm15
+; AVX512DQ-FAST-NEXT:    vmovdqa 32(%r8), %xmm10
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm15, %xmm9
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm9, %xmm13
-; AVX512DQ-FAST-NEXT:    vpor %xmm12, %xmm13, %xmm12
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm7, %ymm6
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm7, %ymm24
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm17, %ymm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm7
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm10, %xmm12
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm0, %xmm21
+; AVX512DQ-FAST-NEXT:    vporq %xmm9, %xmm12, %xmm22
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm13, %ymm7
-; AVX512DQ-FAST-NEXT:    vpor %ymm6, %ymm7, %ymm6
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,ymm15[14],zero,zero,zero,zero,zero,zero,ymm15[15],zero,zero,zero,zero,zero,zero,ymm15[16],zero,zero,zero,zero,zero,zero,ymm15[17],zero,zero,zero,zero,zero,zero,ymm15[18]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm3[0,1,14],zero,ymm3[12,13,0,1,14,15],zero,ymm3[3,12,13,2,3,16],zero,ymm3[30,31,28,29,16,17],zero,ymm3[31,18,19,28,29,18],zero
-; AVX512DQ-FAST-NEXT:    vpor %ymm6, %ymm7, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm19, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,ymm3[14],zero,zero,zero,zero,zero,zero,ymm3[15],zero,zero,zero,zero,zero,zero,ymm3[16],zero,zero,zero,zero,zero,zero,ymm3[17],zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm6
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm6, %ymm4, %ymm6
-; AVX512DQ-FAST-NEXT:    vpor %ymm3, %ymm6, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm14, %xmm4, %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm4, %xmm17
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm7
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm7, %xmm1
-; AVX512DQ-FAST-NEXT:    vpor %xmm3, %xmm1, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %xmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm18, %xmm1
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm3, %xmm12
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm5
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm5, %xmm2
-; AVX512DQ-FAST-NEXT:    vpor %xmm1, %xmm2, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm13, %ymm20
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm14, %ymm2
+; AVX512DQ-FAST-NEXT:    vpor %ymm7, %ymm2, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm16, %ymm7
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,ymm7[14],zero,zero,zero,zero,zero,zero,ymm7[15],zero,zero,zero,zero,zero,zero,ymm7[16],zero,zero,zero,zero,zero,zero,ymm7[17],zero,zero,zero,zero,zero,zero,ymm7[18]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm17, %ymm7
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[0,1,14],zero,ymm7[12,13,0,1,14,15],zero,ymm7[3,12,13,2,3,16],zero,ymm7[30,31,28,29,16,17],zero,ymm7[31,18,19,28,29,18],zero
+; AVX512DQ-FAST-NEXT:    vpor %ymm2, %ymm7, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %ymm18, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm7
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm7, %ymm2, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
+; AVX512DQ-FAST-NEXT:    vpor %ymm2, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rsi), %xmm13
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm11, %xmm13, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdi), %xmm9
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm9, %xmm2
+; AVX512DQ-FAST-NEXT:    vporq %xmm0, %xmm2, %xmm31
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rcx), %xmm14
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm14, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rdx), %xmm8
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm19, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm8, %xmm2
+; AVX512DQ-FAST-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%r9), %xmm2
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm2, %xmm1
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm2, %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm2, %xmm3
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%r8), %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm4, %xmm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm21, %xmm2
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm2
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpor %xmm1, %xmm2, %xmm0
+; AVX512DQ-FAST-NEXT:    vpor %xmm0, %xmm2, %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero,ymm0[27],zero,ymm0[25]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm6
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm6[21],zero,ymm6[19],zero,zero,zero,zero,ymm6[22],zero,ymm6[20],zero,zero
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm0
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero,zero,zero,ymm0[27],zero,ymm0[25]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm23, %ymm12
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm12[21],zero,ymm12[19],zero,zero,zero,zero,ymm12[22],zero,ymm12[20],zero,zero
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm15
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm15[25],zero,ymm15[23],zero,zero,zero,zero,ymm15[26],zero,ymm15[24],zero,zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20]
-; AVX512DQ-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm15, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm25
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm8
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm26, %ymm11
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm11[25],zero,ymm11[23],zero,zero,zero,zero,ymm11[26],zero,ymm11[24],zero,zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[18],zero,zero,zero,zero,ymm11[21],zero,ymm11[19],zero,zero,zero,zero,ymm11[22],zero,ymm11[20]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
-; AVX512DQ-FAST-NEXT:    # ymm14 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
-; AVX512DQ-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm10, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm10, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm29
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm0
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27,24,25,128,23,128,21,22,23,26,128,24,128,28,29,26,27]
+; AVX512DQ-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128,18,128,18,19,20,21,128,19,128,25,26,27,22,128,20,128]
+; AVX512DQ-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm19
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm1, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm30
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm23, %zmm0, %zmm1
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm24, %zmm0, %zmm0
 ; AVX512DQ-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,0,1],zmm1[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm2[0,1,0,1],zmm0[4,5,6,7]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vbroadcasti32x4 {{.*#+}} ymm23 = [2,2,3,3,2,2,3,3]
-; AVX512DQ-FAST-NEXT:    # ymm23 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [2,2,3,3,2,2,3,3]
+; AVX512DQ-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
 ; AVX512DQ-FAST-NEXT:    vmovdqa (%rax), %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm2, %ymm23, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; AVX512DQ-FAST-NEXT:    vpandn %ymm2, %ymm3, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa (%rax), %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm19
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm18
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm10[30],zero,ymm10[28],zero,zero,zero,zero,ymm10[31],zero,ymm10[29],zero,zero
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm26
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,6]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm0, %ymm2, %ymm0
+; AVX512DQ-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa (%rax), %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm5, %ymm4, %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm5, %ymm18
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm24
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29],zero,zero
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm23
 ; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm31, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm30
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm22, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,zero,zero,zero,ymm0[30],zero,ymm0[28],zero,zero,zero,zero,ymm0[31],zero,ymm0[29]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} ymm31 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
-; AVX512DQ-FAST-NEXT:    # ymm31 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm31, %ymm2, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm16, %xmm10
-; AVX512DQ-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3],xmm9[4],xmm10[4],xmm9[5],xmm10[5],xmm9[6],xmm10[6],xmm9[7],xmm10[7]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm16 = zmm3[0,1,2,3],zmm2[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm21, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, %xmm1
-; AVX512DQ-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm2, %xmm22
-; AVX512DQ-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
-; AVX512DQ-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm12[8],xmm5[8],xmm12[9],xmm5[9],xmm12[10],xmm5[10],xmm12[11],xmm5[11],xmm12[12],xmm5[12],xmm12[13],xmm5[13],xmm12[14],xmm5[14],xmm12[15],xmm5[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm5, %xmm20
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm12, %xmm21
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm3, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm25
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,zero,zero,zero,ymm1[30],zero,ymm1[28],zero,zero,zero,zero,ymm1[31],zero,ymm1[29]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vbroadcasti64x2 {{.*#+}} ymm26 = [18374967954648269055,71777218572844800,18374967954648269055,71777218572844800]
+; AVX512DQ-FAST-NEXT:    # ymm26 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm26, %ymm0, %ymm1
+; AVX512DQ-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3],xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm1[0,1,2,3],zmm0[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm29, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm28, %xmm1
+; AVX512DQ-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX512DQ-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX512DQ-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm14[8],xmm8[8],xmm14[9],xmm8[9],xmm14[10],xmm8[10],xmm14[11],xmm8[11],xmm14[12],xmm8[12],xmm14[13],xmm8[13],xmm14[14],xmm8[14],xmm14[15],xmm8[15]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm28, %xmm0
-; AVX512DQ-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3],xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
-; AVX512DQ-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm11[8],xmm0[9],xmm11[9],xmm0[10],xmm11[10],xmm0[11],xmm11[11],xmm0[12],xmm11[12],xmm0[13],xmm11[13],xmm0[14],xmm11[14],xmm0[15],xmm11[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm17, %xmm3
-; AVX512DQ-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm3[8],xmm7[8],xmm3[9],xmm7[9],xmm3[10],xmm7[10],xmm3[11],xmm7[11],xmm3[12],xmm7[12],xmm3[13],xmm7[13],xmm3[14],xmm7[14],xmm3[15],xmm7[15]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm2, %xmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm0
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm9[8],xmm10[8],xmm9[9],xmm10[9],xmm9[10],xmm10[10],xmm9[11],xmm10[11],xmm9[12],xmm10[12],xmm9[13],xmm10[13],xmm9[14],xmm10[14],xmm9[15],xmm10[15]
-; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm8, %ymm12
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm24, %ymm11
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm1
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm27 = ymm1[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm13, %ymm1
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm28 = ymm1[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vpshufb %xmm4, %xmm0, %xmm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,0,1],zmm0[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm1, %zmm28
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm27, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa %xmm6, %xmm1
+; AVX512DQ-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
+; AVX512DQ-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX512DQ-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm13[8],xmm9[8],xmm13[9],xmm9[9],xmm13[10],xmm9[10],xmm13[11],xmm9[11],xmm13[12],xmm9[12],xmm13[13],xmm9[13],xmm13[14],xmm9[14],xmm13[15],xmm9[15]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm0
+; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm27
+; AVX512DQ-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm10[8],xmm15[8],xmm10[9],xmm15[9],xmm10[10],xmm15[10],xmm10[11],xmm15[11],xmm10[12],xmm15[12],xmm10[13],xmm15[13],xmm10[14],xmm15[14],xmm10[15],xmm15[15]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm22[0,1,0,1],zmm1[0,1,0,1]
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FAST-NEXT:    vmovdqa 32(%rax), %xmm0
-; AVX512DQ-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm0, %ymm23, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,5,6]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm0, %xmm29
+; AVX512DQ-FAST-NEXT:    vpermd %ymm1, %ymm2, %ymm0
 ; AVX512DQ-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm11, %ymm10
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm15[9,u,7,u,u,u,u,10,u,8,u,u,u,u,11,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm24 = ymm0[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm13, %ymm8
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm15[21],zero,ymm15[19],zero,zero,zero,zero,ymm15[22],zero,ymm15[20],zero,zero
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12,11,0,0,0,15,14,13,12]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm11, %ymm5
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm20, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm1
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm1[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm19, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm4
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm1[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[18],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm2, %ymm4
+; AVX512DQ-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10,9,8,7,0,0,0,11,10]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm0, %ymm1
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm2, %ymm30
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm19 = ymm1[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm11 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20],zero,zero
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm18, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm1, %ymm0, %ymm3
 ; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm0 = ymm0[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
 ; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [4,5,4,5,5,7,4,5]
 ; AVX512DQ-FAST-NEXT:    vpermd %ymm0, %ymm1, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm25 = [255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255]
-; AVX512DQ-FAST-NEXT:    vpandnq %ymm0, %ymm25, %ymm0
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm23
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29]
-; AVX512DQ-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm15, %ymm9
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm17 = ymm9[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm16
+; AVX512DQ-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm9[0],xmm13[0],xmm9[1],xmm13[1],xmm9[2],xmm13[2],xmm9[3],xmm13[3],xmm9[4],xmm13[4],xmm9[5],xmm13[5],xmm9[6],xmm13[6],xmm9[7],xmm13[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm6, %xmm6
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29,28,29,30,128,28,128,30,31,30,31,128,29,128,31,28,29]
+; AVX512DQ-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm17, %ymm3
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm3, %ymm15
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm17 = ymm15[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm2
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm2
 ; AVX512DQ-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm13
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm29
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm25 = ymm13[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3],xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5>
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm5, %xmm5
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm13, %xmm7, %xmm7
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm1 # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm1[23],zero,ymm1[23,24,25,26],zero,ymm1[24],zero,ymm1[30,31]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm1, %ymm30
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm19 = ymm7[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa %ymm6, %ymm2
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm6[30],zero,ymm6[28],zero,zero,zero,zero,ymm6[31],zero,ymm6[29],zero,zero,zero
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm7[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm0[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm12[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm26[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm20, %xmm1
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm21, %xmm4
-; AVX512DQ-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm22, %xmm4
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm13
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm11[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm1, %xmm12, %xmm1
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
-; AVX512DQ-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm15, %ymm9
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm18 = ymm13[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, %xmm31, %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm14[0],xmm8[1],xmm14[1],xmm8[2],xmm14[2],xmm8[3],xmm14[3],xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm7, %xmm7
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm13
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm0[23],zero,ymm0[23,24,25,26],zero,ymm0[24],zero,ymm0[30,31]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm0, %ymm25
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm8[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa %ymm12, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm12[30],zero,ymm12[28],zero,zero,zero,zero,ymm12[31],zero,ymm12[29],zero,zero,zero
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm9, %ymm2, %ymm9
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm11 # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[9,u,7,u,u,u,u,10,u,8,u,u,u,u,11,u,25,u,23,u,u,u,u,26,u,24,u,u,u,u,27,u]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm23[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm11[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm13, %zmm31 # 16-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23,18,19,20,21,128,19,128,21,20,21,22,128,20,128,22,23]
+; AVX512DQ-FAST-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm3, %ymm14
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm3
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm3, %ymm1, %ymm13
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm2, %ymm1
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm3, %ymm12
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vpor %ymm6, %ymm14, %ymm2
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm2, %zmm2
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
-; AVX512DQ-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm5, %ymm7, %ymm0
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm7
+; AVX512DQ-FAST-NEXT:    vpor %ymm12, %ymm9, %ymm9
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm9, %zmm6
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm9 = [18374966859431673855,18446463693966278655,18374966859431673855,18446463693966278655]
+; AVX512DQ-FAST-NEXT:    # ymm9 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, %ymm9, %ymm5, %ymm0
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm0, %zmm7
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm0, %zmm7
-; AVX512DQ-FAST-NEXT:    vpor %ymm10, %ymm8, %ymm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm6, %zmm0, %zmm7
+; AVX512DQ-FAST-NEXT:    vpor %ymm4, %ymm10, %ymm4
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm4
+; AVX512DQ-FAST-NEXT:    vpor %ymm8, %ymm14, %ymm2
 ; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512DQ-FAST-NEXT:    vpor %ymm4, %ymm9, %ymm3
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm6
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm0, %zmm6
-; AVX512DQ-FAST-NEXT:    vpandq %ymm5, %ymm27, %ymm0
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm28, %zmm0
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm5
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm0, %zmm5
+; AVX512DQ-FAST-NEXT:    vpandq %ymm9, %ymm22, %ymm0
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm20, %zmm0
 ; AVX512DQ-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # zmm2 = mem[2,3,2,3,6,7,6,7]
 ; AVX512DQ-FAST-NEXT:    vporq %zmm2, %zmm0, %zmm0
-; AVX512DQ-FAST-NEXT:    vpandq %ymm31, %ymm24, %ymm2
+; AVX512DQ-FAST-NEXT:    vpandq %ymm26, %ymm19, %ymm2
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm2, %zmm2
-; AVX512DQ-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm3 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FAST-NEXT:    vporq %zmm3, %zmm2, %zmm2
+; AVX512DQ-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # zmm4 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FAST-NEXT:    vporq %zmm4, %zmm2, %zmm2
 ; AVX512DQ-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vpandq %ymm31, %ymm25, %ymm0
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm19, %zmm0
-; AVX512DQ-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm3 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FAST-NEXT:    vporq %zmm3, %zmm0, %zmm3
-; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
-; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm0, %zmm3
-; AVX512DQ-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm2 = mem[0,1,0,1,4,5,4,5]
-; AVX512DQ-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm8 = mem[0,1,0,1,4,5,4,5]
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm0, %zmm8
-; AVX512DQ-FAST-NEXT:    vpandq %ymm31, %ymm1, %ymm1
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm12, %zmm1
+; AVX512DQ-FAST-NEXT:    vpandq %ymm26, %ymm18, %ymm0
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm15, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # zmm4 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FAST-NEXT:    vporq %zmm4, %zmm0, %zmm0
+; AVX512DQ-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255]
+; AVX512DQ-FAST-NEXT:    vpternlogq $184, %zmm2, %zmm4, %zmm0
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} zmm2 = zmm28[0,1,0,1,4,5,4,5]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} zmm8 = zmm27[0,1,0,1,4,5,4,5]
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm4, %zmm8
+; AVX512DQ-FAST-NEXT:    vpandq %ymm26, %ymm13, %ymm2
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
 ; AVX512DQ-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # zmm2 = mem[2,3,2,3,6,7,6,7]
 ; AVX512DQ-FAST-NEXT:    vporq %zmm2, %zmm1, %zmm1
 ; AVX512DQ-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # zmm2 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm5 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FAST-NEXT:    vporq %zmm2, %zmm5, %zmm22
-; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm0, %zmm22
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
-; AVX512DQ-FAST-NEXT:    vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm26 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm26 = zmm0[0,1,0,1],mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm4[1,1,0,0,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,1,0,1,2,0,0,1]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm1, %ymm2, %ymm19
-; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm0[1,1,0,0,4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm5, %ymm2, %ymm17
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = [4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm10
-; AVX512DQ-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm5
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
-; AVX512DQ-FAST-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm29, %ymm0
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm0, %ymm13
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[25],zero,ymm0[23],zero,zero,zero,zero,ymm0[26],zero,ymm0[24],zero,zero
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm12, %ymm0, %ymm12
-; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
+; AVX512DQ-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # zmm6 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FAST-NEXT:    vporq %zmm2, %zmm6, %zmm9
+; AVX512DQ-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm4, %zmm9
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u]
+; AVX512DQ-FAST-NEXT:    vshufi64x2 $0, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm18 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # zmm18 = zmm1[0,1,0,1],mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %xmm29, %xmm3
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[1,1,0,0,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm4 = [0,1,0,1,2,0,0,1]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm2, %ymm4, %ymm19
+; AVX512DQ-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm1[1,1,0,0,4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm6, %ymm4, %ymm17
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = [4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm3, %xmm10
+; AVX512DQ-FAST-NEXT:    vpshufb %xmm6, %xmm1, %xmm6
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22]
+; AVX512DQ-FAST-NEXT:    # ymm11 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm12
+; AVX512DQ-FAST-NEXT:    vmovdqu (%rsp), %ymm1 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm1[25],zero,ymm1[23],zero,zero,zero,zero,ymm1[26],zero,ymm1[24],zero,zero
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm11
+; AVX512DQ-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm14 = [20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128,20,128,18,128,20,21,20,21,128,19,128,19,20,21,22,128]
+; AVX512DQ-FAST-NEXT:    # ymm14 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm25, %ymm1
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm1, %ymm15
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm2[23],zero,ymm2[23,24,25,26],zero,ymm2[24],zero,ymm2[30,31]
+; AVX512DQ-FAST-NEXT:    vpshufb %ymm14, %ymm2, %ymm14
+; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
+; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm4 = ymm3[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
+; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [4,5,4,5,5,7,4,5]
+; AVX512DQ-FAST-NEXT:    vpermd %ymm4, %ymm2, %ymm20
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
+; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
+; AVX512DQ-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # zmm22 = mem[2,3,2,3,6,7,6,7]
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm22
+; AVX512DQ-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %ymm30, %ymm1
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm1, %ymm2
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm9 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,ymm9[23],zero,ymm9[23,24,25,26],zero,ymm9[24],zero,ymm9[30,31]
-; AVX512DQ-FAST-NEXT:    vpshufb %ymm0, %ymm9, %ymm0
-; AVX512DQ-FAST-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
-; AVX512DQ-FAST-NEXT:    vpshuflw {{.*#+}} ymm15 = ymm4[2,1,1,2,4,5,6,7,10,9,9,10,12,13,14,15]
-; AVX512DQ-FAST-NEXT:    vmovdqa {{.*#+}} ymm9 = [4,5,4,5,5,7,4,5]
-; AVX512DQ-FAST-NEXT:    vpermd %ymm15, %ymm9, %ymm20
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,26,27,28,29,30,31,30,31,28,29,28,29,30,31]
-; AVX512DQ-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,22,23,26,27,24,25,22,23,24,25,26,27,26,27,24,25]
-; AVX512DQ-FAST-NEXT:    vpermq $238, {{[-0-9]+}}(%r{{[sb]}}p), %zmm24 # 64-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # zmm24 = mem[2,3,2,3,6,7,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm24
-; AVX512DQ-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    # ymm25 = mem[0,1,0,1]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    # ymm23 = mem[0,1,0,1]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,1,0]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,1,0]
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,0,1,0]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
 ; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm25 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm25
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm18
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm25, %zmm18
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm15, %zmm3
-; AVX512DQ-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm16
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm16
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm3 # 32-byte Folded Reload
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm3
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm3
-; AVX512DQ-FAST-NEXT:    vpor %ymm2, %ymm13, %ymm2
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm23 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm23
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm24
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm23, %zmm24
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm19, %zmm2, %zmm0
+; AVX512DQ-FAST-NEXT:    vpternlogq $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm21
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm21
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm10, %zmm0 # 32-byte Folded Reload
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm0
+; AVX512DQ-FAST-NEXT:    vpor %ymm12, %ymm15, %ymm2
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm2
-; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm4 # 64-byte Reload
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm4[0,1,2,3],zmm2[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm23
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm23
+; AVX512DQ-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm7 # 64-byte Reload
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm2 = zmm7[0,1,2,3],zmm2[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm16
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm16
 ; AVX512DQ-FAST-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Folded Reload
 ; AVX512DQ-FAST-NEXT:    # zmm2 = mem[0,1,0,1,4,5,4,5]
-; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} zmm6 = zmm11[0,1,0,1,4,5,4,5]
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm6
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm17, %zmm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm26, %zmm2
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm2
-; AVX512DQ-FAST-NEXT:    vpor %ymm1, %ymm14, %ymm1
-; AVX512DQ-FAST-NEXT:    vpor %ymm0, %ymm12, %ymm0
+; AVX512DQ-FAST-NEXT:    vpermq {{.*#+}} zmm5 = zmm31[0,1,0,1,4,5,4,5]
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm5
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm17, %zmm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm18, %zmm2
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm2
+; AVX512DQ-FAST-NEXT:    vpor %ymm1, %ymm13, %ymm1
+; AVX512DQ-FAST-NEXT:    vpor %ymm11, %ymm14, %ymm5
 ; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm1
-; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm1[4,5,6,7]
-; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm20, %zmm1
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm22, %zmm1
+; AVX512DQ-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm5[0,1,2,3],zmm1[4,5,6,7]
+; AVX512DQ-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm20, %zmm4
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm4
+; AVX512DQ-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm4
 ; AVX512DQ-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm1, 128(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm4, 128(%rax)
 ; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm2, (%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm23, 320(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm3, 256(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, 192(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm18, 64(%rax)
-; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, 384(%rax)
-; AVX512DQ-FAST-NEXT:    addq $1496, %rsp # imm = 0x5D8
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm16, 320(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm0, 256(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm21, 192(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm24, 64(%rax)
+; AVX512DQ-FAST-NEXT:    vmovdqa64 %zmm22, 384(%rax)
+; AVX512DQ-FAST-NEXT:    addq $1256, %rsp # imm = 0x4E8
 ; AVX512DQ-FAST-NEXT:    vzeroupper
 ; AVX512DQ-FAST-NEXT:    retq
 ;


        


More information about the llvm-commits mailing list