[llvm] a7d8d11 - [X86] combineConcatVectorOps - constant fold vector load concatenation directly into a new load.

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 11 08:24:44 PST 2023


Author: Simon Pilgrim
Date: 2023-12-11T16:23:04Z
New Revision: a7d8d11a144fb09afb3fcbef62fe302ae2fef897

URL: https://github.com/llvm/llvm-project/commit/a7d8d11a144fb09afb3fcbef62fe302ae2fef897
DIFF: https://github.com/llvm/llvm-project/commit/a7d8d11a144fb09afb3fcbef62fe302ae2fef897.diff

LOG: [X86] combineConcatVectorOps - constant fold vector load concatenation directly into a new load.

Create a new constant pool entry directly instead of going via a BUILD_VECTOR node, which makes constant pool reuse more difficult.

Helps with some regressions in #73509

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/combine-concatvectors.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
    llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
    llvm/test/CodeGen/X86/vector-shuffle-v192.ll
    llvm/test/CodeGen/X86/x86-interleaved-access.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 22630f9a6b82f8..adafb425babf10 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -7040,6 +7040,31 @@ static SDValue combineToConsecutiveLoads(EVT VT, SDValue Op, const SDLoc &DL,
                                   IsAfterLegalize);
 }
 
+static Constant *getConstantVector(MVT VT, ArrayRef<APInt> Bits,
+                                   const APInt &Undefs, LLVMContext &C) {
+  unsigned ScalarSize = VT.getScalarSizeInBits();
+  Type *Ty = EVT(VT.getScalarType()).getTypeForEVT(C);
+
+  auto getConstantScalar = [&](const APInt &Val) -> Constant * {
+    if (VT.isFloatingPoint()) {
+      if (ScalarSize == 16)
+        return ConstantFP::get(C, APFloat(APFloat::IEEEhalf(), Val));
+      if (ScalarSize == 32)
+        return ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
+      assert(ScalarSize == 64 && "Unsupported floating point scalar size");
+      return ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
+    }
+    return Constant::getIntegerValue(Ty, Val);
+  };
+
+  SmallVector<Constant *, 32> ConstantVec;
+  for (unsigned I = 0, E = Bits.size(); I != E; ++I)
+    ConstantVec.push_back(Undefs[I] ? UndefValue::get(Ty)
+                                    : getConstantScalar(Bits[I]));
+
+  return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
+}
+
 static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
                                    unsigned SplatBitSize, LLVMContext &C) {
   unsigned ScalarSize = VT.getScalarSizeInBits();
@@ -54978,6 +55003,32 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
     }
   }
 
+  // Attempt to fold target constant loads.
+  if (all_of(Ops, [](SDValue Op) { return getTargetConstantFromNode(Op); })) {
+    SmallVector<APInt> EltBits;
+    APInt UndefElts = APInt::getZero(VT.getVectorNumElements());
+    for (unsigned I = 0; I != NumOps; ++I) {
+      APInt OpUndefElts;
+      SmallVector<APInt> OpEltBits;
+      if (!getTargetConstantBitsFromNode(Ops[I], EltSizeInBits, OpUndefElts,
+                                         OpEltBits, true, false))
+        break;
+      EltBits.append(OpEltBits);
+      UndefElts.insertBits(OpUndefElts, I * OpUndefElts.getBitWidth());
+    }
+    if (EltBits.size() == VT.getVectorNumElements()) {
+      Constant *C = getConstantVector(VT, EltBits, UndefElts, Ctx);
+      MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
+      SDValue CV = DAG.getConstantPool(C, PVT);
+      MachineFunction &MF = DAG.getMachineFunction();
+      MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
+      SDValue Ld = DAG.getLoad(VT, DL, DAG.getEntryNode(), CV, MPI);
+      SDValue Sub = extractSubVector(Ld, 0, DAG, DL, Op0.getValueSizeInBits());
+      DAG.ReplaceAllUsesOfValueWith(Op0, Sub);
+      return Ld;
+    }
+  }
+
   // If this simple subvector or scalar/subvector broadcast_load is inserted
   // into both halves, use a larger broadcast_load. Update other uses to use
   // an extracted subvector.
@@ -55000,23 +55051,6 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
     }
   }
 
-  // Attempt to fold target constant loads.
-  if (all_of(Ops, [](SDValue Op) { return getTargetConstantFromNode(Op); })) {
-    SmallVector<APInt> EltBits;
-    APInt UndefElts = APInt::getZero(VT.getVectorNumElements());
-    for (unsigned I = 0; I != NumOps; ++I) {
-      APInt OpUndefElts;
-      SmallVector<APInt> OpEltBits;
-      if (!getTargetConstantBitsFromNode(Ops[I], EltSizeInBits, OpUndefElts,
-                                        OpEltBits, true, false))
-          break;
-      EltBits.append(OpEltBits);
-      UndefElts.insertBits(OpUndefElts, I * OpUndefElts.getBitWidth());
-    }
-    if (EltBits.size() == VT.getVectorNumElements())
-      return getConstVector(EltBits, UndefElts, VT, DAG, DL);
-  }
-
   // If we're splatting a 128-bit subvector to 512-bits, use SHUF128 directly.
   if (IsSplat && NumOps == 4 && VT.is512BitVector() &&
       Subtarget.useAVX512Regs()) {

diff  --git a/llvm/test/CodeGen/X86/combine-concatvectors.ll b/llvm/test/CodeGen/X86/combine-concatvectors.ll
index a8c3fb589bbfe9..8a0e39c57b787a 100644
--- a/llvm/test/CodeGen/X86/combine-concatvectors.ll
+++ b/llvm/test/CodeGen/X86/combine-concatvectors.ll
@@ -48,8 +48,7 @@ define void @concat_of_broadcast_v2f64_v4f64() {
 ; AVX1-NEXT:    movl $1091567616, 30256(%rax) # imm = 0x41100000
 ; AVX1-NEXT:    movabsq $4294967297, %rcx # imm = 0x100000001
 ; AVX1-NEXT:    movq %rcx, 46348(%rax)
-; AVX1-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = [7.812501848093234E-3,7.812501848093234E-3,7.812501848093234E-3,7.812501848093234E-3]
-; AVX1-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX1-NEXT:    vbroadcastss {{.*#+}} ymm0 = [1065353216,1065353216,1065353216,1065353216,1065353216,1065353216,1065353216,1065353216]
 ; AVX1-NEXT:    vmovups %ymm0, 48296(%rax)
 ; AVX1-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
 ; AVX1-NEXT:    vmovsd %xmm0, 47372(%rax)

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
index 3dc5818d6c9b7f..16b5e8db76ef13 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-3.ll
@@ -652,8 +652,7 @@ define void @store_i16_stride3_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
 ; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,0,0,u,1,1,u,2>
 ; AVX512F-NEXT:    vpermd %ymm0, %ymm2, %ymm2
-; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-NEXT:    vpandn %ymm2, %ymm3, %ymm2
+; AVX512F-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
 ; AVX512F-NEXT:    vpshufb {{.*#+}} ymm3 = zero,zero,ymm0[10,11],zero,zero,zero,zero,ymm0[12,13],zero,zero,zero,zero,ymm0[14,15],zero,zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,ymm0[18,19],zero,zero,zero,zero
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
 ; AVX512F-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm2

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
index 59866f2b8577be..413cd7d63d8c67 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
@@ -2776,7 +2776,7 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm13 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdx), %ymm17
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm5
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm6
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm17[3,2,3,3,7,6,7,7]
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm1
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm1[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
@@ -2793,8 +2793,8 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,5,6]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 (%rdi), %ymm20
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm20[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm6
-; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm6[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm5
+; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm5[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[2,3,2,2,6,7,6,6]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm7[0],ymm3[1],ymm7[2],ymm3[3],ymm7[4,5],ymm3[6],ymm7[7,8],ymm3[9],ymm7[10],ymm3[11],ymm7[12,13],ymm3[14],ymm7[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,2]
@@ -2822,7 +2822,7 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm4 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm4, %ymm0, %ymm2
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm4, %ymm23
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm5[3,0,3,0,7,4,7,4]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm6[3,0,3,0,7,4,7,4]
 ; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm8[0],ymm2[1],ymm8[2],ymm2[3],ymm8[4,5],ymm2[6],ymm8[7,8],ymm2[9],ymm8[10],ymm2[11],ymm8[12,13],ymm2[14],ymm8[15]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm10, %xmm10
@@ -2831,15 +2831,15 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,0]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm9, %zmm2
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm11, %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm10
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm9
+; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm9
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%r8), %ymm10
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm11 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm9, %ymm4
+; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm10, %ymm4
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vpandnq %ymm9, %ymm21, %ymm9
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm9, %zmm9
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm21, %zmm2, %zmm9
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,1,1,1]
+; AVX512F-SLOW-NEXT:    vpandnq %ymm10, %ymm21, %ymm10
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm10, %zmm10
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm21, %zmm2, %zmm10
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm2
 ; AVX512F-SLOW-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm12[0],xmm2[0],xmm12[1],xmm2[1],xmm12[2],xmm2[2],xmm12[3],xmm2[3]
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm4, %xmm4
@@ -2860,43 +2860,44 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
 ; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm7, %zmm4
 ; AVX512F-SLOW-NEXT:    vpbroadcastq (%r8), %ymm2
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm10[0,1,1,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm2, %zmm2
-; AVX512F-SLOW-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm2
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm18[0,1,2,1,4,5,6,5]
-; AVX512F-SLOW-NEXT:    vprolq $16, %ymm3, %ymm8
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm8[0,1],ymm4[2],ymm8[3],ymm4[4],ymm8[5,6],ymm4[7],ymm8[8,9],ymm4[10],ymm8[11],ymm4[12],ymm8[13,14],ymm4[15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm18[2,3,2,3,6,7,6,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm9[0,1,1,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm2, %zmm12
+; AVX512F-SLOW-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm12
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm2 = ymm18[0,1,2,1,4,5,6,5]
+; AVX512F-SLOW-NEXT:    vprolq $16, %ymm3, %ymm4
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0,1],ymm2[2],ymm4[3],ymm2[4],ymm4[5,6],ymm2[7],ymm4[8,9],ymm2[10],ymm4[11],ymm2[12],ymm4[13,14],ymm2[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm4 = ymm18[2,3,2,3,6,7,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,2,6,7,6,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm8[1],ymm3[2],ymm8[3],ymm3[4,5],ymm8[6],ymm3[7,8],ymm8[9],ymm3[10],ymm8[11],ymm3[12,13],ymm8[14],ymm3[15]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8],ymm4[9],ymm3[10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,2]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm4
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
 ; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25,18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25]
 ; AVX512F-SLOW-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm0, %ymm8
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm12 = ymm5[1,1,1,2,5,5,5,6]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm12[1],ymm8[2,3],ymm12[4],ymm8[5],ymm12[6],ymm8[7,8],ymm12[9],ymm8[10,11],ymm12[12],ymm8[13],ymm12[14],ymm8[15]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[3,2,3,3,7,6,7,7]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm3, %ymm0, %ymm4
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm8 = ymm6[1,1,1,2,5,5,5,6]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm8[1],ymm4[2,3],ymm8[4],ymm4[5],ymm8[6],ymm4[7,8],ymm8[9],ymm4[10,11],ymm8[12],ymm4[13],ymm8[14],ymm4[15]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[3,2,3,3,7,6,7,7]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2],ymm5[3,4],ymm0[5,6,7,8],ymm5[9],ymm0[10],ymm5[11,12],ymm0[13,14,15]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm6[1],ymm0[2],ymm6[3,4],ymm0[5,6,7,8],ymm6[9],ymm0[10],ymm6[11,12],ymm0[13,14,15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,3,2]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm0
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm4, %zmm7, %zmm0
-; AVX512F-SLOW-NEXT:    vpbroadcastq 48(%r8), %ymm4
-; AVX512F-SLOW-NEXT:    vpbroadcastq 56(%r8), %ymm5
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512F-SLOW-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm4
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm0
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm7, %zmm0
+; AVX512F-SLOW-NEXT:    vpbroadcastq 48(%r8), %ymm2
+; AVX512F-SLOW-NEXT:    vpbroadcastq 56(%r8), %ymm4
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
+; AVX512F-SLOW-NEXT:    vpternlogd $184, %zmm0, %zmm4, %zmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm22, %ymm0
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm6, %ymm0
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm20[1,1,2,2]
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2,3],ymm0[4],ymm5[5],ymm0[6],ymm5[7,8],ymm0[9],ymm5[10,11],ymm0[12],ymm5[13],ymm0[14],ymm5[15]
-; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm5 = ymm20[0,1,2,1,4,5,6,5]
-; AVX512F-SLOW-NEXT:    vprolq $16, %ymm6, %ymm6
-; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0,1],ymm5[2],ymm6[3],ymm5[4],ymm6[5,6],ymm5[7],ymm6[8,9],ymm5[10],ymm6[11],ymm5[12],ymm6[13,14],ymm5[15]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm20[1,1,2,2]
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm0 = ymm6[0],ymm0[1],ymm6[2,3],ymm0[4],ymm6[5],ymm0[6],ymm6[7,8],ymm0[9],ymm6[10,11],ymm0[12],ymm6[13],ymm0[14],ymm6[15]
+; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm6 = ymm20[0,1,2,1,4,5,6,5]
+; AVX512F-SLOW-NEXT:    vprolq $16, %ymm5, %ymm5
+; AVX512F-SLOW-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm6[2],ymm5[3],ymm6[4],ymm5[5,6],ymm6[7],ymm5[8,9],ymm6[10],ymm5[11],ymm6[12],ymm5[13,14],ymm6[15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %ymm23, %ymm5
@@ -2909,15 +2910,15 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm1
 ; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm0, %zmm16, %zmm1
-; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm10, %ymm0
-; AVX512F-SLOW-NEXT:    vpbroadcastq 16(%r8), %ymm3
-; AVX512F-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpbroadcastq 16(%r8), %ymm0
+; AVX512F-SLOW-NEXT:    vpandn %ymm0, %ymm4, %ymm0
+; AVX512F-SLOW-NEXT:    vpshufb %ymm11, %ymm9, %ymm3
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
 ; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, 64(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm4, 256(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, (%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm9, 192(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm2, 256(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm12, (%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm10, 192(%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm19, 128(%r9)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
@@ -2927,7 +2928,7 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm5
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u>
 ; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm23
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm1, %ymm17
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %ymm7
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm7[1,1,2,2]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
@@ -2935,29 +2936,29 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm2
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm19
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm20
 ; AVX512F-FAST-NEXT:    vpbroadcastq 40(%rdi), %xmm2
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm4
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm1
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm8
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm8, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm2, %ymm24
-; AVX512F-FAST-NEXT:    vmovdqa64 (%rdx), %ymm17
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u>
+; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm8, %ymm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm24
+; AVX512F-FAST-NEXT:    vmovdqa64 (%rdx), %ymm18
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %ymm9
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm9[3,0,3,0,7,4,7,4]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm9[3,0,3,0,7,4,7,4]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4,5],ymm2[6],ymm4[7,8],ymm2[9],ymm4[10],ymm2[11],ymm4[12,13],ymm2[14],ymm4[15]
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %xmm11
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm15 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm11, %xmm2
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm4 = <10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9>
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm11, %xmm13
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm6
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm13 = xmm6[1,2,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm13[1],xmm2[2],xmm13[3],xmm2[4,5],xmm13[6],xmm2[7]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,0]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm15 = xmm6[1,2,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0],xmm15[1],xmm13[2],xmm15[3],xmm13[4,5],xmm15[6],xmm13[7]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,0]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm13, %zmm15
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm15
 ; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm2
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm13 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
@@ -2966,64 +2967,65 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,1,1]
 ; AVX512F-FAST-NEXT:    vpandnq %ymm2, %ymm16, %ymm2
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm18
-; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm16, %zmm1, %zmm18
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm1
-; AVX512F-FAST-NEXT:    vpshufb %xmm15, %xmm1, %xmm3
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm19
+; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm16, %zmm15, %zmm19
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm3
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm3, %xmm4
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm15
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} xmm14 = xmm15[1,2,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm14[1],xmm3[2],xmm14[3],xmm3[4,5],xmm14[6],xmm3[7]
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm14[1],xmm4[2],xmm14[3],xmm4[4,5],xmm14[6],xmm4[7]
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm20
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm21
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [0,1,0,1,8,9,8,8]
-; AVX512F-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm14
-; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm1
-; AVX512F-FAST-NEXT:    vpbroadcastq 8(%rdi), %xmm3
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5],xmm3[6],xmm1[7]
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 32(%rdi), %xmm21
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1],xmm3[2],xmm10[2],xmm3[3],xmm10[3]
+; AVX512F-FAST-NEXT:    vpermi2q %zmm4, %zmm3, %zmm14
+; AVX512F-FAST-NEXT:    vpshufb %xmm12, %xmm10, %xmm3
+; AVX512F-FAST-NEXT:    vpbroadcastq 8(%rdi), %xmm4
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5],xmm4[6],xmm3[7]
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa64 32(%rdi), %xmm22
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
-; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm22
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
+; AVX512F-FAST-NEXT:    vpshufb %xmm2, %xmm4, %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm23
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm3 = zmm3[0,1,0,1,4,5,4,5]
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm14, %zmm16, %zmm1
-; AVX512F-FAST-NEXT:    vpbroadcastq (%r8), %ymm3
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm14, %zmm16, %zmm3
+; AVX512F-FAST-NEXT:    vpbroadcastq (%r8), %ymm4
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm0[0,1,1,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm3, %zmm10
-; AVX512F-FAST-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm10
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm4, %zmm10
+; AVX512F-FAST-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm10
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm15
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm23, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm15, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm15, %ymm3
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm14
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm14[1,1,2,2]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5],ymm1[6],ymm3[7,8],ymm1[9],ymm3[10,11],ymm1[12],ymm3[13],ymm1[14],ymm3[15]
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm3 = ymm14[0,1,2,1,4,5,6,5]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm14[1,1,2,2]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5],ymm3[6],ymm4[7,8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13],ymm3[14],ymm4[15]
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm14[0,1,2,1,4,5,6,5]
 ; AVX512F-FAST-NEXT:    vprolq $16, %ymm15, %ymm13
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm13[0,1],ymm3[2],ymm13[3],ymm3[4],ymm13[5,6],ymm3[7],ymm13[8,9],ymm3[10],ymm13[11],ymm3[12],ymm13[13,14],ymm3[15]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm24, %ymm1
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm1
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm17[3,0,3,0,7,4,7,4]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm1[1],ymm13[2],ymm1[3],ymm13[4,5],ymm1[6],ymm13[7,8],ymm1[9],ymm13[10],ymm1[11],ymm13[12,13],ymm1[14],ymm13[15]
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25,18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25]
-; AVX512F-FAST-NEXT:    # ymm1 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm4, %ymm2
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm17[1,1,1,2,5,5,5,6]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm13[0,1],ymm4[2],ymm13[3],ymm4[4],ymm13[5,6],ymm4[7],ymm13[8,9],ymm4[10],ymm13[11],ymm4[12],ymm13[13,14],ymm4[15]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm24, %ymm2
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm4
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm13 = ymm18[3,0,3,0,7,4,7,4]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm4[1],ymm13[2],ymm4[3],ymm13[4,5],ymm4[6],ymm13[7,8],ymm4[9],ymm13[10],ymm4[11],ymm13[12,13],ymm4[14],ymm13[15]
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25,18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25]
+; AVX512F-FAST-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm1, %ymm2
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm12 = ymm18[1,1,1,2,5,5,5,6]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm12[1],ymm2[2,3],ymm12[4],ymm2[5],ymm12[6],ymm2[7,8],ymm12[9],ymm2[10,11],ymm12[12],ymm2[13],ymm12[14],ymm2[15]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm13, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm12, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm25, %ymm3
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm17, %zmm2
 ; AVX512F-FAST-NEXT:    vpbroadcastq 16(%r8), %ymm3
-; AVX512F-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
+; AVX512F-FAST-NEXT:    vpandn %ymm3, %ymm12, %ymm3
+; AVX512F-FAST-NEXT:    vmovdqa64 %ymm25, %ymm13
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm0, %ymm0
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
 ; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29]
@@ -3040,44 +3042,44 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm8, %ymm3
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm9[3,2,3,3,7,6,7,7]
 ; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm7[1],ymm3[2],ymm7[3,4],ymm3[5,6,7,8],ymm7[9],ymm3[10],ymm7[11,12],ymm3[13,14,15]
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm8, %ymm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm4, %ymm8, %ymm4
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm7 = ymm9[1,1,1,2,5,5,5,6]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm7[1],ymm1[2,3],ymm7[4],ymm1[5],ymm7[6],ymm1[7,8],ymm7[9],ymm1[10,11],ymm7[12],ymm1[13],ymm7[14],ymm1[15]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm7[1],ymm4[2,3],ymm7[4],ymm4[5],ymm7[6],ymm4[7,8],ymm7[9],ymm4[10,11],ymm7[12],ymm4[13],ymm7[14],ymm4[15]
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [2,3,2,3,10,10,11,10]
-; AVX512F-FAST-NEXT:    vpermi2q %zmm3, %zmm1, %zmm7
+; AVX512F-FAST-NEXT:    vpermi2q %zmm3, %zmm4, %zmm7
 ; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm5, %zmm16, %zmm7
-; AVX512F-FAST-NEXT:    vpbroadcastq 48(%r8), %ymm1
-; AVX512F-FAST-NEXT:    vpbroadcastq 56(%r8), %ymm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512F-FAST-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm7, %zmm1
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm11[0],xmm6[0],xmm11[1],xmm6[1],xmm11[2],xmm6[2],xmm11[3],xmm6[3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm20, %xmm5
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
-; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm4, %ymm2
-; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm4 = ymm17[3,2,3,3,7,6,7,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2],ymm4[3,4],ymm2[5,6,7,8],ymm4[9],ymm2[10],ymm4[11,12],ymm2[13,14,15]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [2,2,3,2,8,9,8,9]
-; AVX512F-FAST-NEXT:    vpermi2q %zmm3, %zmm2, %zmm4
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm19, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm21, %xmm3
-; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm22, %xmm3
-; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm15, %ymm3
+; AVX512F-FAST-NEXT:    vpbroadcastq 48(%r8), %ymm3
+; AVX512F-FAST-NEXT:    vpbroadcastq 56(%r8), %ymm4
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512F-FAST-NEXT:    vpternlogd $184, %zmm7, %zmm12, %zmm3
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm11[0],xmm6[0],xmm11[1],xmm6[1],xmm11[2],xmm6[2],xmm11[3],xmm6[3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm21, %xmm5
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm4, %xmm4
+; AVX512F-FAST-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm2 = ymm18[3,2,3,3,7,6,7,7]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3,4],ymm1[5,6,7,8],ymm2[9],ymm1[10],ymm2[11,12],ymm1[13,14,15]
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [2,2,3,2,8,9,8,9]
+; AVX512F-FAST-NEXT:    vpermi2q %zmm4, %zmm1, %zmm2
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm20, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm22, %xmm4
+; AVX512F-FAST-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm23, %xmm4
+; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
+; AVX512F-FAST-NEXT:    vpshufb %ymm13, %ymm15, %ymm4
 ; AVX512F-FAST-NEXT:    vpshufd {{.*#+}} ymm5 = ymm14[2,3,2,3,6,7,6,7]
-; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2],ymm5[3],ymm3[4,5],ymm5[6],ymm3[7,8],ymm5[9],ymm3[10],ymm5[11],ymm3[12,13],ymm5[14],ymm3[15]
+; AVX512F-FAST-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7,8],ymm5[9],ymm4[10],ymm5[11],ymm4[12,13],ymm5[14],ymm4[15]
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [2,3,2,2,8,9,8,9]
-; AVX512F-FAST-NEXT:    vpermi2q %zmm2, %zmm3, %zmm5
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm4, %zmm12, %zmm5
-; AVX512F-FAST-NEXT:    vpbroadcastq 24(%r8), %ymm2
-; AVX512F-FAST-NEXT:    vpbroadcastq 32(%r8), %ymm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512F-FAST-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm2, 128(%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 256(%r9)
+; AVX512F-FAST-NEXT:    vpermi2q %zmm1, %zmm4, %zmm5
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm2, %zmm17, %zmm5
+; AVX512F-FAST-NEXT:    vpbroadcastq 24(%r8), %ymm1
+; AVX512F-FAST-NEXT:    vpbroadcastq 32(%r8), %ymm2
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-FAST-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 128(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, 256(%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, 64(%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm10, (%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm18, 192(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm19, 192(%r9)
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
index 3de19185ea0539..0bb8e17e0ee484 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-7.ll
@@ -2341,8 +2341,7 @@ define void @store_i16_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm2[u,u,u,u,u,u,u,u,u,u,14,15],zero,zero,ymm2[u,u,u,u,u,u,u,u,u,u,16,17],zero,zero,ymm2[u,u,u,u]
 ; AVX512F-SLOW-NEXT:    vporq %ymm5, %ymm10, %ymm19
 ; AVX512F-SLOW-NEXT:    vpbroadcastd 8(%rax), %ymm5
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm10 = [65535,65535,0,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535]
-; AVX512F-SLOW-NEXT:    vpandn %ymm5, %ymm10, %ymm5
+; AVX512F-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm13[12,13,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm13[14,15,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm13[16,17,u,u]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm5, %zmm5
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm10 = ymm6[0,1,1,3,4,5,5,7]

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
index 8ccb4ef56d9c74..f727622682cf23 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
@@ -567,8 +567,8 @@ define void @store_i8_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm3
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
-; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
+; AVX512BW-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
 ; AVX512BW-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
 ; AVX512BW-NEXT:    vpshufb %zmm2, %zmm1, %zmm1
@@ -1086,8 +1086,8 @@ define void @store_i8_stride3_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512BW-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
 ; AVX512BW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm2
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
-; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512BW-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
+; AVX512BW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    vpshufb %zmm3, %zmm2, %zmm2
 ; AVX512BW-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm4
 ; AVX512BW-NEXT:    vpshufb %zmm3, %zmm4, %zmm4

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
index 33f4e4b0f6d0e4..4174fde817472a 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-5.ll
@@ -3890,36 +3890,37 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm15 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm15, %ymm3, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm7 = <12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19>
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm2, %ymm1
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = <12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19>
+; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm2, %ymm1
 ; AVX512F-SLOW-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm8 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm1, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm30
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm1, %xmm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm16
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rsi), %xmm4
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm4, %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm4, %xmm31
 ; AVX512F-SLOW-NEXT:    vpor %xmm0, %xmm1, %xmm0
 ; AVX512F-SLOW-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm9
+; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %ymm8
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm0 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm9, %ymm4
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm8, %ymm4
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %ymm11
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm1 = <u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u>
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm11, %ymm10
-; AVX512F-SLOW-NEXT:    vporq %ymm4, %ymm10, %ymm18
+; AVX512F-SLOW-NEXT:    vpor %ymm4, %ymm10, %ymm4
+; AVX512F-SLOW-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rcx), %xmm13
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm13, %xmm4
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm25
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm13, %xmm4
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm25
 ; AVX512F-SLOW-NEXT:    vmovdqa 32(%rdx), %xmm10
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm5 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
-; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm10, %xmm12
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm5, %xmm26
-; AVX512F-SLOW-NEXT:    vporq %xmm4, %xmm12, %xmm19
+; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm6 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
+; AVX512F-SLOW-NEXT:    vpshufb %xmm6, %xmm10, %xmm12
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm6, %xmm26
+; AVX512F-SLOW-NEXT:    vporq %xmm4, %xmm12, %xmm20
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm4 = ymm3[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm3[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm22
@@ -3927,31 +3928,31 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero,ymm2[25],zero,zero
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm23
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[27],zero,zero,ymm11[26],zero,ymm11[28],zero,ymm11[30],zero,zero,ymm11[29],zero,ymm11[31],zero,zero
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm9[19],zero,ymm9[21],zero,zero,ymm9[20],zero,ymm9[22],zero,ymm9[24],zero,zero,ymm9[23],zero
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm8[19],zero,ymm8[21],zero,zero,ymm8[20],zero,ymm8[22],zero,ymm8[24],zero,zero,ymm8[23],zero
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm24
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm12
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm12, %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm6
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm6, %ymm1
-; AVX512F-SLOW-NEXT:    vporq %ymm0, %ymm1, %ymm20
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm5
-; AVX512F-SLOW-NEXT:    vpshufb %ymm15, %ymm5, %ymm2
+; AVX512F-SLOW-NEXT:    vporq %ymm0, %ymm1, %ymm19
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm7
+; AVX512F-SLOW-NEXT:    vpshufb %ymm15, %ymm7, %ymm2
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm4
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm4, %ymm3
+; AVX512F-SLOW-NEXT:    vpshufb %ymm9, %ymm4, %ymm3
 ; AVX512F-SLOW-NEXT:    vporq %ymm2, %ymm3, %ymm21
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX512F-SLOW-NEXT:    vpshufb %xmm8, %xmm0, %xmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm16
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm7
-; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm7, %xmm3
-; AVX512F-SLOW-NEXT:    vporq %xmm2, %xmm3, %xmm27
+; AVX512F-SLOW-NEXT:    vpshufb %xmm5, %xmm0, %xmm3
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm0, %xmm17
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %xmm5
+; AVX512F-SLOW-NEXT:    vpshufb %xmm14, %xmm5, %xmm9
+; AVX512F-SLOW-NEXT:    vporq %xmm3, %xmm9, %xmm27
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm25, %xmm0
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm17
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm1, %xmm18
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %xmm9
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm26, %xmm1
-; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm8, %xmm15
+; AVX512F-SLOW-NEXT:    vpshufb %xmm1, %xmm9, %xmm15
 ; AVX512F-SLOW-NEXT:    vporq %xmm0, %xmm15, %xmm29
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm0 = mem[1,1,2,2]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,1,1]
@@ -3965,10 +3966,11 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm1, %ymm0, %ymm1
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,1,4,6,5,5]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,3,2]
-; AVX512F-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
+; AVX512F-SLOW-NEXT:    vpandnq %ymm0, %ymm30, %ymm0
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm25
 ; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm9, %ymm9
+; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm8, %ymm8
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm14 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm12[19],zero,ymm12[21],zero,zero,ymm12[20],zero,ymm12[22],zero,ymm12[24],zero,zero,ymm12[23],zero
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm12, %ymm12
 ; AVX512F-SLOW-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
@@ -3977,24 +3979,25 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm0, %ymm6, %ymm2
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,ymm6[26],zero,ymm6[28],zero,ymm6[30],zero,zero,ymm6[29],zero,ymm6[31],zero,zero
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3],xmm13[4],xmm10[4],xmm13[5],xmm10[5],xmm13[6],xmm10[6],xmm13[7],xmm10[7]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = ymm5[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm5 = ymm5[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm13 = ymm7[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm3 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,ymm4[27],zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30],zero
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm30, %xmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm1
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm31, %xmm4
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <4,u,5,5,5,5,u,6,6,6,6,u,7,7,7,7>
-; AVX512F-SLOW-NEXT:    vpermd %zmm15, %zmm1, %zmm30
-; AVX512F-SLOW-NEXT:    vmovdqa64 (%r8), %zmm31
+; AVX512F-SLOW-NEXT:    vpermd %zmm15, %zmm1, %zmm31
+; AVX512F-SLOW-NEXT:    vmovdqa64 (%r8), %zmm16
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <6,6,6,u,7,7,7,7,u,16,16,16,16,u,17,17>
-; AVX512F-SLOW-NEXT:    vpermi2d %zmm15, %zmm31, %zmm1
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm16, %xmm15
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm15[0],xmm7[0],xmm15[1],xmm7[1],xmm15[2],xmm7[2],xmm15[3],xmm7[3],xmm15[4],xmm7[4],xmm15[5],xmm7[5],xmm15[6],xmm7[6],xmm15[7],xmm7[7]
+; AVX512F-SLOW-NEXT:    vpermi2d %zmm15, %zmm16, %zmm1
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm15
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm15[0],xmm5[0],xmm15[1],xmm5[1],xmm15[2],xmm5[2],xmm15[3],xmm5[3],xmm15[4],xmm5[4],xmm15[5],xmm5[5],xmm15[6],xmm5[6],xmm15[7],xmm5[7]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm4, %xmm4
-; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm5, %xmm5
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm27, %zmm5, %zmm5
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
@@ -4002,65 +4005,64 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm27, %zmm7, %zmm27
+; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm18, %xmm15
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm9 = xmm15[0],xmm9[0],xmm15[1],xmm9[1],xmm15[2],xmm9[2],xmm15[3],xmm9[3],xmm15[4],xmm9[4],xmm15[5],xmm9[5],xmm15[6],xmm9[6],xmm15[7],xmm9[7]
 ; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} xmm15 = <2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8>
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm10, %xmm10
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vmovdqa64 %xmm17, %xmm7
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm8, %xmm8
-; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm29, %zmm8, %zmm8
+; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm9, %xmm9
+; AVX512F-SLOW-NEXT:    vinserti32x4 $2, %xmm29, %zmm9, %zmm9
 ; AVX512F-SLOW-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload
 ; AVX512F-SLOW-NEXT:    # ymm15 = mem[0,0,1,1]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm15 # 32-byte Folded Reload
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm16 = ymm19[0,0,1,1]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm18, %zmm16, %zmm16
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm15, %zmm17, %zmm16
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm28, %zmm16, %zmm26
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm17 = ymm20[0,0,1,1]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm17 # 32-byte Folded Reload
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm15, %zmm18, %zmm17
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm28, %zmm17, %zmm26
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm15 = zmm22[2,2,3,3,6,6,7,7]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm17 = zmm23[2,2,3,3,6,6,7,7]
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400,72056498804555775,72056498804555775,18374967950370078975,18374967950370078975]
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm20, %zmm15, %zmm17
+; AVX512F-SLOW-NEXT:    vpandq %ymm20, %ymm8, %ymm8
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm8, %zmm11, %zmm8
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm11 = zmm24[2,2,3,3,6,6,7,7]
+; AVX512F-SLOW-NEXT:    vporq %zmm11, %zmm8, %zmm8
+; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm17, %zmm11, %zmm8
+; AVX512F-SLOW-NEXT:    vpternlogd $184, %zmm8, %zmm30, %zmm31
 ; AVX512F-SLOW-NEXT:    vpor %ymm2, %ymm14, %ymm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm20, %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400,72056498804555775,72056498804555775,18374967950370078975,18374967950370078975]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm14, %ymm13, %ymm0
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm19, %zmm2
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm20, %ymm13, %ymm0
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm21, %zmm0
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm17, %zmm0
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm13 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %zmm14, %zmm2, %zmm13
-; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm14, %ymm12, %ymm6
-; AVX512F-SLOW-NEXT:    vpand %ymm14, %ymm9, %ymm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm11, %zmm2
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm9 = zmm24[2,2,3,3,6,6,7,7]
-; AVX512F-SLOW-NEXT:    vporq %zmm9, %zmm2, %zmm2
-; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm9 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm13, %zmm9, %zmm2
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm6, %zmm6
-; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm3
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm18, %zmm0
+; AVX512F-SLOW-NEXT:    vpternlogq $248, %ymm20, %ymm12, %ymm6
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm6, %zmm2
+; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm3
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm6, %zmm9, %zmm3
-; AVX512F-SLOW-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm30
+; AVX512F-SLOW-NEXT:    vpternlogq $226, %zmm2, %zmm11, %zmm3
 ; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm25
 ; AVX512F-SLOW-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm1
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm27[0,0,1,1,4,4,5,5]
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm8[0,0,1,1,4,4,5,5]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm5[0,0,1,1,4,4,5,5]
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm9[0,0,1,1,4,4,5,5]
 ; AVX512F-SLOW-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
 ; AVX512F-SLOW-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <u,0,0,0,0,u,1,1,1,1,u,2,2,2,2,u>
-; AVX512F-SLOW-NEXT:    vpermd %zmm31, %zmm0, %zmm0
+; AVX512F-SLOW-NEXT:    vpermd %zmm16, %zmm0, %zmm0
 ; AVX512F-SLOW-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm25, 64(%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, (%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm1, 128(%r9)
-; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm30, 256(%r9)
+; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm31, 256(%r9)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm26, 192(%r9)
 ; AVX512F-SLOW-NEXT:    vzeroupper
 ; AVX512F-SLOW-NEXT:    retq
 ;
 ; AVX512F-FAST-LABEL: store_i8_stride5_vf64:
 ; AVX512F-FAST:       # %bb.0:
-; AVX512F-FAST-NEXT:    pushq %rax
+; AVX512F-FAST-NEXT:    subq $24, %rsp
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %ymm3
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm8 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm3, %ymm0
@@ -4069,14 +4071,14 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm1, %ymm2
 ; AVX512F-FAST-NEXT:    vpor %ymm0, %ymm2, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
-; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm2, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm2, %xmm19
+; AVX512F-FAST-NEXT:    vmovdqa 32(%rdi), %xmm0
+; AVX512F-FAST-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rsi), %xmm4
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm13 = <128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm4, %xmm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm18
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm19
 ; AVX512F-FAST-NEXT:    vpor %xmm0, %xmm2, %xmm0
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rcx), %ymm7
@@ -4092,9 +4094,9 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpshufb %xmm4, %xmm12, %xmm2
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm4, %xmm25
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%rdx), %xmm10
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
-; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm10, %xmm4
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm5, %xmm26
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
+; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm10, %xmm4
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm6, %xmm26
 ; AVX512F-FAST-NEXT:    vpor %xmm2, %xmm4, %xmm2
 ; AVX512F-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm3[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
@@ -4104,140 +4106,139 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm22
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm9[27],zero,zero,ymm9[26],zero,ymm9[28],zero,ymm9[30],zero,zero,ymm9[29],zero,ymm9[31],zero,zero
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128]
-; AVX512F-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm3, %ymm7, %ymm2
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm3, %ymm31
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm7[19],zero,ymm7[21],zero,zero,ymm7[20],zero,ymm7[22],zero,ymm7[24],zero,zero,ymm7[23],zero
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm23
-; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm5
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
+; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm4
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %ymm14
 ; AVX512F-FAST-NEXT:    vpshufb %ymm15, %ymm14, %ymm1
 ; AVX512F-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm24
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %ymm15
 ; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm15, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm4
-; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm4, %ymm1
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %ymm6
+; AVX512F-FAST-NEXT:    vpshufb %ymm11, %ymm6, %ymm1
 ; AVX512F-FAST-NEXT:    vporq %ymm0, %ymm1, %ymm20
-; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX512F-FAST-NEXT:    vpshufb %xmm6, %xmm1, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm16
+; AVX512F-FAST-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm5, %xmm0, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm0, %xmm16
 ; AVX512F-FAST-NEXT:    vmovdqa (%rsi), %xmm3
 ; AVX512F-FAST-NEXT:    vpshufb %xmm13, %xmm3, %xmm2
-; AVX512F-FAST-NEXT:    vporq %xmm0, %xmm2, %xmm28
+; AVX512F-FAST-NEXT:    vporq %xmm1, %xmm2, %xmm28
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm1
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm25, %xmm0
-; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm8
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm18
+; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm26, %xmm0
+; AVX512F-FAST-NEXT:    vpshufb %xmm0, %xmm1, %xmm11
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm1, %xmm17
-; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm11
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm26, %xmm1
-; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm11, %xmm8
-; AVX512F-FAST-NEXT:    vporq %xmm0, %xmm8, %xmm29
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [1,1,2,2,2,2,2,2]
+; AVX512F-FAST-NEXT:    vporq %xmm8, %xmm11, %xmm29
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm11 = [1,1,2,2,2,2,2,2]
 ; AVX512F-FAST-NEXT:    vmovdqa 32(%r8), %ymm8
-; AVX512F-FAST-NEXT:    vpermd %ymm8, %ymm0, %ymm0
+; AVX512F-FAST-NEXT:    vpermd %ymm8, %ymm11, %ymm11
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255]
-; AVX512F-FAST-NEXT:    vpandnq %ymm0, %ymm25, %ymm0
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm8, %ymm13
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm26
-; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm2
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm2, %ymm1
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = <4,u,5,5,5,5,u,6,30,30,30,u,31,31,31,31>
-; AVX512F-FAST-NEXT:    vpermd %ymm2, %ymm30, %ymm27
-; AVX512F-FAST-NEXT:    vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm27, %ymm27
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm27, %zmm1, %zmm27
-; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm7, %ymm7
-; AVX512F-FAST-NEXT:    vmovdqa64 %ymm31, %ymm0
-; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm5, %ymm13
-; AVX512F-FAST-NEXT:    vpshufb %ymm1, %ymm5, %ymm6
-; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm5 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
-; AVX512F-FAST-NEXT:    # ymm5 = mem[0,1,0,1]
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm9, %ymm9
-; AVX512F-FAST-NEXT:    vpshufb %ymm5, %ymm14, %ymm5
+; AVX512F-FAST-NEXT:    vpandnq %ymm11, %ymm25, %ymm11
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128]
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm8, %ymm13
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm13, %zmm11, %zmm26
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm31 = <4,u,5,5,5,5,u,6,30,30,30,u,31,31,31,31>
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm30 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0]
+; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm11
+; AVX512F-FAST-NEXT:    vpermd %ymm11, %ymm31, %ymm27
+; AVX512F-FAST-NEXT:    vpandnq %ymm27, %ymm30, %ymm27
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm11, %ymm0
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm27, %zmm0, %zmm27
+; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm0 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12]
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm7, %ymm7
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm13 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm5
+; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25]
+; AVX512F-FAST-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm9, %ymm9
+; AVX512F-FAST-NEXT:    vpshufb %ymm0, %ymm14, %ymm4
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,ymm14[26],zero,ymm14[28],zero,ymm14[30],zero,zero,ymm14[29],zero,ymm14[31],zero,zero
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3],xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm12 = ymm15[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u]
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm15 = ymm15[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm31 = ymm7[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,ymm4[27],zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30],zero
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm19, %xmm0
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm18, %xmm1
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[21],zero,zero,ymm6[20],zero,ymm6[22],zero,ymm6[24],zero,zero,ymm6[23],zero,ymm6[25],zero,zero
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[26],zero,ymm6[28],zero,zero,ymm6[27],zero,ymm6[29],zero,ymm6[31],zero,zero,ymm6[30],zero
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm19, %xmm1
+; AVX512F-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
 ; AVX512F-FAST-NEXT:    vmovdqa64 %xmm16, %xmm1
 ; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
 ; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
 ; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm0, %xmm0
 ; AVX512F-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm1
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm9[2,2,3,3]
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm28, %zmm1, %zmm28
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm7[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm9[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm13[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm14[2,2,3,3]
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm28, %zmm1, %zmm18
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8>
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm10, %xmm10
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm13 = ymm5[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3]
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm18, %xmm3
+; AVX512F-FAST-NEXT:    vmovdqa64 %xmm17, %xmm5
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm5 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
+; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} xmm3 = <2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8>
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm10, %xmm10
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1]
-; AVX512F-FAST-NEXT:    vmovdqa64 %xmm17, %xmm1
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3],xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
-; AVX512F-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm11
-; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm29, %zmm11, %zmm11
-; AVX512F-FAST-NEXT:    vmovdqa64 (%r8), %zmm14
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm14, %zmm2
+; AVX512F-FAST-NEXT:    vpshufb %xmm3, %xmm5, %xmm3
+; AVX512F-FAST-NEXT:    vinserti32x4 $2, %xmm29, %zmm3, %zmm3
+; AVX512F-FAST-NEXT:    vmovdqa64 (%r8), %zmm5
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm5, %zmm11
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm21 = zmm21[2,2,3,3,6,6,7,7]
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm22 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm28 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400,72056498804555775,72056498804555775,18374967950370078975,18374967950370078975]
-; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm28, %zmm21, %zmm22
-; AVX512F-FAST-NEXT:    vpandq %ymm28, %ymm31, %ymm21
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm3, %zmm3
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm21 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512F-FAST-NEXT:    vporq %zmm21, %zmm3, %zmm3
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm22, %zmm21, %zmm3
-; AVX512F-FAST-NEXT:    vpermt2d %zmm14, %zmm30, %zmm8
-; AVX512F-FAST-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm8
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm29 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400,72056498804555775,72056498804555775,18374967950370078975,18374967950370078975]
+; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm29, %zmm21, %zmm22
+; AVX512F-FAST-NEXT:    vpandq %ymm29, %ymm1, %ymm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm7, %zmm1
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm7 = zmm23[2,2,3,3,6,6,7,7]
+; AVX512F-FAST-NEXT:    vporq %zmm7, %zmm1, %zmm1
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255]
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm22, %zmm7, %zmm1
+; AVX512F-FAST-NEXT:    vpermt2d %zmm5, %zmm31, %zmm8
+; AVX512F-FAST-NEXT:    vpternlogd $184, %zmm1, %zmm30, %zmm8
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm8, 256(%r9)
-; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512F-FAST-NEXT:    # ymm3 = mem[0,0,1,1]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm3 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload
+; AVX512F-FAST-NEXT:    # ymm1 = mem[0,0,1,1]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload
 ; AVX512F-FAST-NEXT:    vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
 ; AVX512F-FAST-NEXT:    # ymm8 = mem[0,0,1,1]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 32-byte Folded Reload
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0]
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm16, %zmm8
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm16, %zmm8
 ; AVX512F-FAST-NEXT:    vpternlogq $248, %zmm25, %zmm8, %zmm26
-; AVX512F-FAST-NEXT:    vpor %ymm5, %ymm9, %ymm3
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm3, %zmm24, %zmm3
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm28, %ymm12, %ymm7
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm20, %zmm5
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm16, %zmm5
-; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm28, %ymm6, %ymm13
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm13, %zmm3
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm4
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm0
-; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm3, %zmm21, %zmm0
-; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm27
-; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = <6,6,6,u,7,7,7,7,u,8,8,8,8,u,9,9>
-; AVX512F-FAST-NEXT:    vpermd %zmm14, %zmm3, %zmm3
-; AVX512F-FAST-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm3
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm18[0,0,1,1,4,4,5,5]
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm11[0,0,1,1,4,4,5,5]
-; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
+; AVX512F-FAST-NEXT:    vpor %ymm4, %ymm9, %ymm1
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm24, %zmm1
+; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm29, %ymm12, %ymm2
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm20, %zmm2
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm16, %zmm2
+; AVX512F-FAST-NEXT:    vpternlogq $248, %ymm29, %ymm13, %ymm14
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm14, %zmm1
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm6
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm6, %zmm0
+; AVX512F-FAST-NEXT:    vpternlogq $226, %zmm1, %zmm7, %zmm0
+; AVX512F-FAST-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm27
+; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <6,6,6,u,7,7,7,7,u,8,8,8,8,u,9,9>
+; AVX512F-FAST-NEXT:    vpermd %zmm5, %zmm1, %zmm1
+; AVX512F-FAST-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm28[0,0,1,1,4,4,5,5]
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm2 = zmm3[0,0,1,1,4,4,5,5]
+; AVX512F-FAST-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
 ; AVX512F-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = <u,0,0,0,0,u,1,1,9,9,u,10,10,10,10,u>
-; AVX512F-FAST-NEXT:    vpermd %zmm2, %zmm0, %zmm0
-; AVX512F-FAST-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
+; AVX512F-FAST-NEXT:    vpermd %zmm11, %zmm0, %zmm0
+; AVX512F-FAST-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm27, 64(%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm0, (%r9)
-; AVX512F-FAST-NEXT:    vmovdqa64 %zmm3, 128(%r9)
+; AVX512F-FAST-NEXT:    vmovdqa64 %zmm1, 128(%r9)
 ; AVX512F-FAST-NEXT:    vmovdqa64 %zmm26, 192(%r9)
-; AVX512F-FAST-NEXT:    popq %rax
+; AVX512F-FAST-NEXT:    addq $24, %rsp
 ; AVX512F-FAST-NEXT:    vzeroupper
 ; AVX512F-FAST-NEXT:    retq
 ;
@@ -4424,37 +4425,37 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-LABEL: store_i8_stride5_vf64:
 ; AVX512BW-FAST:       # %bb.0:
 ; AVX512BW-FAST-NEXT:    vmovdqa64 (%r8), %zmm5
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdx), %ymm21
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdx), %ymm20
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm1, %ymm21, %ymm0
+; AVX512BW-FAST-NEXT:    vpshufb %ymm1, %ymm20, %ymm0
 ; AVX512BW-FAST-NEXT:    vmovdqa 32(%rcx), %ymm13
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128]
 ; AVX512BW-FAST-NEXT:    vpshufb %ymm2, %ymm13, %ymm3
 ; AVX512BW-FAST-NEXT:    vpor %ymm0, %ymm3, %ymm0
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %xmm6
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rcx), %xmm18
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rcx), %xmm17
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm8, %xmm18, %xmm3
+; AVX512BW-FAST-NEXT:    vpshufb %xmm8, %xmm17, %xmm3
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm7
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdx), %xmm20
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdx), %xmm19
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm10 = <6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm10, %xmm20, %xmm4
+; AVX512BW-FAST-NEXT:    vpshufb %xmm10, %xmm19, %xmm4
 ; AVX512BW-FAST-NEXT:    vpor %xmm3, %xmm4, %xmm3
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1]
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %xmm9
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %xmm17
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %xmm16
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm17, %xmm3
+; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm16, %xmm3
 ; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %xmm11
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rsi), %xmm19
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rsi), %xmm18
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm14 = <128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm14, %xmm19, %xmm4
+; AVX512BW-FAST-NEXT:    vpshufb %xmm14, %xmm18, %xmm4
 ; AVX512BW-FAST-NEXT:    vpor %xmm3, %xmm4, %xmm3
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm15 = ymm3[0,0,1,1]
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %ymm16
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %ymm21
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm3 = <3,3,3,u,4,4,4,4>
-; AVX512BW-FAST-NEXT:    vpermd %ymm16, %ymm3, %ymm22
+; AVX512BW-FAST-NEXT:    vpermd %ymm21, %ymm3, %ymm22
 ; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rsi), %ymm23
 ; AVX512BW-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm4 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14]
 ; AVX512BW-FAST-NEXT:    movl $138547332, %eax # imm = 0x8421084
@@ -4464,80 +4465,77 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k2
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm15, %zmm0 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%r8), %ymm24
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%r8), %ymm22
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = <1,1,2,2,2,2,2,2,27,27,27,27,u,28,28,28>
-; AVX512BW-FAST-NEXT:    vpermi2d %zmm5, %zmm24, %zmm15
+; AVX512BW-FAST-NEXT:    vpermi2d %zmm5, %zmm22, %zmm15
 ; AVX512BW-FAST-NEXT:    movabsq $4760450083537948804, %rax # imm = 0x4210842108421084
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm15, %zmm0 {%k3}
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm23[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm23[0,1,2,3],mem[4,5,6,7]
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm15 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30>
-; AVX512BW-FAST-NEXT:    vpshufb %zmm15, %zmm22, %zmm22
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm22 = zmm22[2,2,3,3,6,6,7,7]
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm23 = zmm16[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm16 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128>
-; AVX512BW-FAST-NEXT:    vpshufb %zmm16, %zmm23, %zmm23
+; AVX512BW-FAST-NEXT:    vpshufb %zmm15, %zmm23, %zmm23
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm23 = zmm23[2,2,3,3,6,6,7,7]
-; AVX512BW-FAST-NEXT:    vporq %zmm22, %zmm23, %zmm23
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm21[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128>
-; AVX512BW-FAST-NEXT:    vpshufb %zmm21, %zmm22, %zmm22
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm25 = zmm22[2,2,3,3,6,6,7,7]
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm21 = zmm21[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm21 = zmm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm21[21],zero,zero,zmm21[20],zero,zmm21[22],zero,zmm21[24],zero,zero,zmm21[23],zero,zmm21[25],zero,zero,zmm21[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm21[58],zero,zmm21[60],zero,zero,zmm21[59],zero,zmm21[61],zero,zmm21[63],zero,zero,zmm21[62],zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm21 = zmm21[2,2,3,3,6,6,7,7]
+; AVX512BW-FAST-NEXT:    vporq %zmm23, %zmm21, %zmm21
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm20[0,1,2,3],mem[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm20 = zmm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,zmm20[21],zero,zmm20[21,20],zero,zmm20[22],zero,zmm20[24],zero,zmm20[22,23],zero,zmm20[25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm20[59],zero,zero,zmm20[58],zero,zmm20[60],zero,zmm20[62],zero,zero,zmm20[61],zero,zmm20[63],zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm20 = zmm20[2,2,3,3,6,6,7,7]
 ; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],mem[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm22 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128>
-; AVX512BW-FAST-NEXT:    vpshufb %zmm22, %zmm13, %zmm13
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm13 = zmm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm13[19],zero,zmm13[21],zero,zero,zmm13[20],zero,zmm13[22],zero,zmm13[24],zero,zero,zmm13[23],zero,zmm13[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm13[59],zero,zero,zmm13[58],zero,zmm13[60],zero,zmm13[62],zero,zero,zmm13[61],zero,zmm13[63],zero,zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm13 = zmm13[2,2,3,3,6,6,7,7]
-; AVX512BW-FAST-NEXT:    vporq %zmm25, %zmm13, %zmm13
+; AVX512BW-FAST-NEXT:    vporq %zmm20, %zmm13, %zmm13
 ; AVX512BW-FAST-NEXT:    movabsq $1785168781326730801, %rax # imm = 0x18C6318C6318C631
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm23, %zmm13 {%k3}
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
-; AVX512BW-FAST-NEXT:    vpermi2d %zmm5, %zmm24, %zmm23
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm21, %zmm13 {%k3}
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm20 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31]
+; AVX512BW-FAST-NEXT:    vpermi2d %zmm5, %zmm22, %zmm20
 ; AVX512BW-FAST-NEXT:    movabsq $-8925843906633654008, %rax # imm = 0x8421084210842108
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k4
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm23, %zmm13 {%k4}
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %ymm23
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm24 = ymm23[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm23[27],zero,zero,ymm23[26],zero,ymm23[28],zero,ymm23[30],zero,zero,ymm23[29],zero,ymm23[31],zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm25 = ymm24[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdx), %ymm24
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm26 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm24[27],zero,zero,ymm24[26],zero,ymm24[28],zero,ymm24[30],zero,zero,ymm24[29],zero,ymm24[31],zero,zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm26[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vporq %ymm25, %ymm26, %ymm25
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm18 = xmm18[0],xmm20[0],xmm18[1],xmm20[1],xmm18[2],xmm20[2],xmm18[3],xmm20[3],xmm18[4],xmm20[4],xmm18[5],xmm20[5],xmm18[6],xmm20[6],xmm18[7],xmm20[7]
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm26 = <2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm26, %xmm18, %xmm18
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm18 = ymm18[0,0,1,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm18, %zmm25, %zmm25
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rsi), %ymm18
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm20 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm18[26],zero,ymm18[28],zero,zero,zero,zero,ymm18[29],zero,ymm18[31],zero,zero,ymm18[30]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm27 = ymm20[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %ymm20
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm28 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm20[26],zero,ymm20[28],zero,zero,ymm20[27],zero,ymm20[29],zero,ymm20[31],zero,zero,ymm20[30],zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vporq %ymm27, %ymm28, %ymm27
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm20, %zmm13 {%k4}
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %ymm20
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm21 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm20[27],zero,zero,ymm20[26],zero,ymm20[28],zero,ymm20[30],zero,zero,ymm20[29],zero,ymm20[31],zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm21 = ymm21[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdx), %ymm22
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm23 = ymm22[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm22[27],zero,zero,ymm22[26],zero,ymm22[28],zero,ymm22[30],zero,zero,ymm22[29],zero,ymm22[31],zero,zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vporq %ymm21, %ymm23, %ymm21
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm17 = xmm17[0],xmm19[0],xmm17[1],xmm19[1],xmm17[2],xmm19[2],xmm17[3],xmm19[3],xmm17[4],xmm19[4],xmm17[5],xmm19[5],xmm17[6],xmm19[6],xmm17[7],xmm19[7]
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm19 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm19 = <2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8>
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm19, %xmm17, %xmm17
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm17 = ymm17[0,0,1,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm27, %zmm17
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm25, %zmm17 {%k3}
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [6,6,6,6,7,7,7,7,8,8,8,8,8,8,9,9]
-; AVX512BW-FAST-NEXT:    vpermd %zmm5, %zmm25, %zmm5
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm17, %zmm21, %zmm21
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rsi), %ymm17
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm23 = ymm17[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm17[26],zero,ymm17[28],zero,zero,zero,zero,ymm17[29],zero,ymm17[31],zero,zero,ymm17[30]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %ymm24
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm25 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm24[26],zero,ymm24[28],zero,zero,ymm24[27],zero,ymm24[29],zero,ymm24[31],zero,zero,ymm24[30],zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm25 = ymm25[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vporq %ymm23, %ymm25, %ymm23
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm16 = xmm16[0],xmm18[0],xmm16[1],xmm18[1],xmm16[2],xmm18[2],xmm16[3],xmm18[3],xmm16[4],xmm18[4],xmm16[5],xmm18[5],xmm16[6],xmm18[6],xmm16[7],xmm18[7]
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm18 = <0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm18, %xmm16, %xmm16
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm16 = ymm16[0,0,1,1]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm23, %zmm16
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm21, %zmm16 {%k3}
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [6,6,6,6,7,7,7,7,8,8,8,8,8,8,9,9]
+; AVX512BW-FAST-NEXT:    vpermd %zmm5, %zmm21, %zmm5
 ; AVX512BW-FAST-NEXT:    movabsq $2380225041768974402, %rax # imm = 0x2108421084210842
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm5, %zmm17 {%k3}
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm5, %zmm16 {%k3}
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm8, %xmm6, %xmm5
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm10, %xmm7, %xmm8
 ; AVX512BW-FAST-NEXT:    vpor %xmm5, %xmm8, %xmm5
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
-; AVX512BW-FAST-NEXT:    vpshufb %xmm26, %xmm6, %xmm6
+; AVX512BW-FAST-NEXT:    vpshufb %xmm19, %xmm6, %xmm6
 ; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm5, %zmm6, %zmm5
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm5 = zmm5[0,0,1,1,4,4,5,5]
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm9, %xmm6
 ; AVX512BW-FAST-NEXT:    vpshufb %xmm14, %xmm11, %xmm7
 ; AVX512BW-FAST-NEXT:    vpor %xmm6, %xmm7, %xmm6
 ; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3],xmm9[4],xmm11[4],xmm9[5],xmm11[5],xmm9[6],xmm11[6],xmm9[7],xmm11[7]
-; AVX512BW-FAST-NEXT:    vpshufb %xmm19, %xmm7, %xmm7
+; AVX512BW-FAST-NEXT:    vpshufb %xmm18, %xmm7, %xmm7
 ; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm6, %zmm7, %zmm6
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[0,0,1,1,4,4,5,5]
 ; AVX512BW-FAST-NEXT:    movabsq $-4165393823095705204, %rax # imm = 0xC6318C6318C6318C
@@ -4549,22 +4547,22 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    movabsq $595056260442243600, %rax # imm = 0x842108421084210
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm7, %zmm6 {%k3}
-; AVX512BW-FAST-NEXT:    vpshufb %ymm21, %ymm24, %ymm7
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm22, %ymm23, %ymm8
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vpor %ymm7, %ymm8, %ymm7
-; AVX512BW-FAST-NEXT:    vpshufb %ymm1, %ymm24, %ymm1
-; AVX512BW-FAST-NEXT:    vpshufb %ymm2, %ymm23, %ymm2
+; AVX512BW-FAST-NEXT:    vpshufb %ymm1, %ymm22, %ymm1
+; AVX512BW-FAST-NEXT:    vpshufb %ymm2, %ymm20, %ymm2
 ; AVX512BW-FAST-NEXT:    vpor %ymm1, %ymm2, %ymm1
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm1
-; AVX512BW-FAST-NEXT:    vpshufb %ymm15, %ymm18, %ymm2
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm22[18,19],zero,ymm22[21],zero,ymm22[21,20],zero,ymm22[22],zero,ymm22[24],zero,ymm22[22,23],zero,ymm22[25]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm20[19],zero,ymm20[21],zero,zero,ymm20[20],zero,ymm20[22],zero,ymm20[24],zero,zero,ymm20[23],zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
+; AVX512BW-FAST-NEXT:    vpor %ymm2, %ymm7, %ymm2
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512BW-FAST-NEXT:    vpshufb %ymm15, %ymm17, %ymm2
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm16, %ymm20, %ymm7
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm24[21],zero,zero,ymm24[20],zero,ymm24[22],zero,ymm24[24],zero,zero,ymm24[23],zero,ymm24[25],zero,zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3]
 ; AVX512BW-FAST-NEXT:    vpor %ymm2, %ymm7, %ymm2
-; AVX512BW-FAST-NEXT:    vpermd %ymm20, %ymm3, %ymm3
-; AVX512BW-FAST-NEXT:    vpshufb %ymm4, %ymm18, %ymm3 {%k1}
+; AVX512BW-FAST-NEXT:    vpermd %ymm24, %ymm3, %ymm3
+; AVX512BW-FAST-NEXT:    vpshufb %ymm4, %ymm17, %ymm3 {%k1}
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm2 {%k2}
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm1 = <3,3,3,3,u,4,4,4,12,14,13,13,13,13,12,14>
@@ -4574,7 +4572,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm2 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm2, 64(%r9)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm6, (%r9)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm17, 128(%r9)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm16, 128(%r9)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm13, 256(%r9)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, 192(%r9)
 ; AVX512BW-FAST-NEXT:    vzeroupper

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
index d16daa04a48221..b1493847cdb852 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-6.ll
@@ -1724,10 +1724,10 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-LABEL: store_i8_stride6_vf32:
 ; AVX512F-SLOW:       # %bb.0:
 ; AVX512F-SLOW-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm2
-; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm3
-; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm4
-; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm5
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdi), %ymm3
+; AVX512F-SLOW-NEXT:    vmovdqa (%rsi), %ymm5
+; AVX512F-SLOW-NEXT:    vmovdqa (%rdx), %ymm2
+; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %ymm4
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r8), %ymm0
 ; AVX512F-SLOW-NEXT:    vmovdqa (%r9), %ymm1
 ; AVX512F-SLOW-NEXT:    vmovdqa (%rcx), %xmm7
@@ -1735,7 +1735,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[10,11,8,9,6,7,12,13,14,15,14,15,14,15,14,15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,0,0,1]
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm9 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[16],ymm5[16],ymm4[17],ymm5[17],ymm4[18],ymm5[18],ymm4[19],ymm5[19],ymm4[20],ymm5[20],ymm4[21],ymm5[21],ymm4[22],ymm5[22],ymm4[23],ymm5[23]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm9 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
 ; AVX512F-SLOW-NEXT:    vprold $16, %ymm9, %ymm9
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,2,2,3]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm6, %zmm6
@@ -1744,7 +1744,7 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm10[8],xmm9[8],xmm10[9],xmm9[9],xmm10[10],xmm9[10],xmm10[11],xmm9[11],xmm10[12],xmm9[12],xmm10[13],xmm9[13],xmm10[14],xmm9[14],xmm10[15],xmm9[15]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[8,9,6,7,12,13,10,11,14,15,14,15,14,15,14,15]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,0,0,1]
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm12 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm12 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} ymm12 = ymm12[0,3,2,1,4,5,6,7,8,11,10,9,12,13,14,15]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5,6,5,8,9,10,11,12,13,14,13]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,2,2,3]
@@ -1769,17 +1769,17 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm15 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
 ; AVX512F-SLOW-NEXT:    vprold $16, %xmm15, %xmm15
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm13, %zmm15, %zmm13
-; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm15 = zmm13[0,0,0,1,4,4,4,5]
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm13 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm7, %xmm7
-; AVX512F-SLOW-NEXT:    vpshufb %xmm13, %xmm8, %xmm8
+; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm13 = zmm13[0,0,0,1,4,4,4,5]
+; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm15 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
+; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm7, %xmm7
+; AVX512F-SLOW-NEXT:    vpshufb %xmm15, %xmm8, %xmm8
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7]
 ; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7]
 ; AVX512F-SLOW-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm8[0,3,2,1,4,5,6,7]
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm8[0,1,2,3,4,5,6,5]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm7
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm8 = zmm7[0,0,0,1,4,4,4,5]
-; AVX512F-SLOW-NEXT:    vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm15, %zmm8
+; AVX512F-SLOW-NEXT:    vpternlogd $228, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm13, %zmm8
 ; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm7 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm11, %xmm9
 ; AVX512F-SLOW-NEXT:    vpshufb %xmm7, %xmm12, %xmm10
@@ -1790,32 +1790,30 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm10, %zmm9
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm9 = zmm9[0,0,0,1,4,4,4,5]
 ; AVX512F-SLOW-NEXT:    vpternlogd $184, %zmm8, %zmm14, %zmm9
-; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm8 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm3, %ymm10
-; AVX512F-SLOW-NEXT:    vpshufb %ymm8, %ymm2, %ymm8
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm8 = ymm8[0],ymm10[0],ymm8[1],ymm10[1],ymm8[2],ymm10[2],ymm8[3],ymm10[3],ymm8[4],ymm10[4],ymm8[5],ymm10[5],ymm8[6],ymm10[6],ymm8[7],ymm10[7],ymm8[16],ymm10[16],ymm8[17],ymm10[17],ymm8[18],ymm10[18],ymm8[19],ymm10[19],ymm8[20],ymm10[20],ymm8[21],ymm10[21],ymm8[22],ymm10[22],ymm8[23],ymm10[23]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm10 = ymm4[8],ymm5[8],ymm4[9],ymm5[9],ymm4[10],ymm5[10],ymm4[11],ymm5[11],ymm4[12],ymm5[12],ymm4[13],ymm5[13],ymm4[14],ymm5[14],ymm4[15],ymm5[15],ymm4[24],ymm5[24],ymm4[25],ymm5[25],ymm4[26],ymm5[26],ymm4[27],ymm5[27],ymm4[28],ymm5[28],ymm4[29],ymm5[29],ymm4[30],ymm5[30],ymm4[31],ymm5[31]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm15, %ymm4, %ymm8
+; AVX512F-SLOW-NEXT:    vpshufb %ymm15, %ymm2, %ymm10
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm8 = ymm10[0],ymm8[0],ymm10[1],ymm8[1],ymm10[2],ymm8[2],ymm10[3],ymm8[3],ymm10[4],ymm8[4],ymm10[5],ymm8[5],ymm10[6],ymm8[6],ymm10[7],ymm8[7],ymm10[16],ymm8[16],ymm10[17],ymm8[17],ymm10[18],ymm8[18],ymm10[19],ymm8[19],ymm10[20],ymm8[20],ymm10[21],ymm8[21],ymm10[22],ymm8[22],ymm10[23],ymm8[23]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm10 = ymm3[8],ymm5[8],ymm3[9],ymm5[9],ymm3[10],ymm5[10],ymm3[11],ymm5[11],ymm3[12],ymm5[12],ymm3[13],ymm5[13],ymm3[14],ymm5[14],ymm3[15],ymm5[15],ymm3[24],ymm5[24],ymm3[25],ymm5[25],ymm3[26],ymm5[26],ymm3[27],ymm5[27],ymm3[28],ymm5[28],ymm3[29],ymm5[29],ymm3[30],ymm5[30],ymm3[31],ymm5[31]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm10 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm8 = zmm8[2,2,2,3,6,6,6,7]
-; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm5, %ymm5
-; AVX512F-SLOW-NEXT:    vpshufb %ymm13, %ymm4, %ymm4
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[16],ymm5[16],ymm4[17],ymm5[17],ymm4[18],ymm5[18],ymm4[19],ymm5[19],ymm4[20],ymm5[20],ymm4[21],ymm5[21],ymm4[22],ymm5[22],ymm4[23],ymm5[23]
-; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
-; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm4, %zmm2
+; AVX512F-SLOW-NEXT:    vpbroadcastq {{.*#+}} ymm10 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512F-SLOW-NEXT:    vpshufb %ymm10, %ymm5, %ymm5
+; AVX512F-SLOW-NEXT:    vpshufb %ymm10, %ymm3, %ymm3
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm5[0],ymm3[1],ymm5[1],ymm3[2],ymm5[2],ymm3[3],ymm5[3],ymm3[4],ymm5[4],ymm3[5],ymm5[5],ymm3[6],ymm5[6],ymm3[7],ymm5[7],ymm3[16],ymm5[16],ymm3[17],ymm5[17],ymm3[18],ymm5[18],ymm3[19],ymm5[19],ymm3[20],ymm5[20],ymm3[21],ymm5[21],ymm3[22],ymm5[22],ymm3[23],ymm5[23]
+; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15],ymm2[24],ymm4[24],ymm2[25],ymm4[25],ymm2[26],ymm4[26],ymm2[27],ymm4[27],ymm2[28],ymm4[28],ymm2[29],ymm4[29],ymm2[30],ymm4[30],ymm2[31],ymm4[31]
+; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm2, %zmm3, %zmm2
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm2 = zmm2[2,2,2,3,6,6,6,7]
-; AVX512F-SLOW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-SLOW-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-SLOW-NEXT:    vpternlogq $202, %zmm8, %zmm2, %zmm3
-; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm1, %ymm2
+; AVX512F-SLOW-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm2
+; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm1, %ymm3
 ; AVX512F-SLOW-NEXT:    vpshufb %ymm7, %ymm0, %ymm4
-; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm4[0],ymm2[0],ymm4[1],ymm2[1],ymm4[2],ymm2[2],ymm4[3],ymm2[3],ymm4[4],ymm2[4],ymm4[5],ymm2[5],ymm4[6],ymm2[6],ymm4[7],ymm2[7],ymm4[16],ymm2[16],ymm4[17],ymm2[17],ymm4[18],ymm2[18],ymm4[19],ymm2[19],ymm4[20],ymm2[20],ymm4[21],ymm2[21],ymm4[22],ymm2[22],ymm4[23],ymm2[23]
+; AVX512F-SLOW-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
 ; AVX512F-SLOW-NEXT:    vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27,24,25,22,23,28,29,26,27,28,29,30,31]
-; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,2,2,3,6,6,6,7]
-; AVX512F-SLOW-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm0
+; AVX512F-SLOW-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm0, 128(%rax)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm9, (%rax)
 ; AVX512F-SLOW-NEXT:    vmovdqa64 %zmm6, 64(%rax)
@@ -1831,34 +1829,32 @@ define void @store_i8_stride6_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %ymm5
 ; AVX512F-FAST-NEXT:    vmovdqa (%r8), %ymm0
 ; AVX512F-FAST-NEXT:    vmovdqa (%r9), %ymm1
-; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm6 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm3, %ymm7
-; AVX512F-FAST-NEXT:    vpshufb %ymm6, %ymm2, %ymm6
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm6[0],ymm7[0],ymm6[1],ymm7[1],ymm6[2],ymm7[2],ymm6[3],ymm7[3],ymm6[4],ymm7[4],ymm6[5],ymm7[5],ymm6[6],ymm7[6],ymm6[7],ymm7[7],ymm6[16],ymm7[16],ymm6[17],ymm7[17],ymm6[18],ymm7[18],ymm6[19],ymm7[19],ymm6[20],ymm7[20],ymm6[21],ymm7[21],ymm6[22],ymm7[22],ymm6[23],ymm7[23]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm7 = ymm4[8],ymm5[8],ymm4[9],ymm5[9],ymm4[10],ymm5[10],ymm4[11],ymm5[11],ymm4[12],ymm5[12],ymm4[13],ymm5[13],ymm4[14],ymm5[14],ymm4[15],ymm5[15],ymm4[24],ymm5[24],ymm4[25],ymm5[25],ymm4[26],ymm5[26],ymm4[27],ymm5[27],ymm4[28],ymm5[28],ymm4[29],ymm5[29],ymm4[30],ymm5[30],ymm4[31],ymm5[31]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
-; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[2,2,2,3,6,6,6,7]
 ; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm7 = [5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10,5,8,7,6,9,0,0,10]
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm5, %ymm8
-; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm4, %ymm9
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm8 = ymm9[0],ymm8[0],ymm9[1],ymm8[1],ymm9[2],ymm8[2],ymm9[3],ymm8[3],ymm9[4],ymm8[4],ymm9[5],ymm8[5],ymm9[6],ymm8[6],ymm9[7],ymm8[7],ymm9[16],ymm8[16],ymm9[17],ymm8[17],ymm9[18],ymm8[18],ymm9[19],ymm8[19],ymm9[20],ymm8[20],ymm9[21],ymm8[21],ymm9[22],ymm8[22],ymm9[23],ymm8[23]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm9 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
+; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm5, %ymm6
+; AVX512F-FAST-NEXT:    vpshufb %ymm7, %ymm4, %ymm8
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm8[0],ymm6[0],ymm8[1],ymm6[1],ymm8[2],ymm6[2],ymm8[3],ymm6[3],ymm8[4],ymm6[4],ymm8[5],ymm6[5],ymm8[6],ymm6[6],ymm8[7],ymm6[7],ymm8[16],ymm6[16],ymm8[17],ymm6[17],ymm8[18],ymm6[18],ymm8[19],ymm6[19],ymm8[20],ymm6[20],ymm8[21],ymm6[21],ymm8[22],ymm6[22],ymm8[23],ymm6[23]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm8 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,22,23,28,29,26,27,30,31,30,31,30,31,30,31]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm6
+; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[2,2,2,3,6,6,6,7]
+; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm8 = [8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0,8,7,6,9,0,0,10,0]
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm3, %ymm9
+; AVX512F-FAST-NEXT:    vpshufb %ymm8, %ymm2, %ymm8
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm8 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[4],ymm9[4],ymm8[5],ymm9[5],ymm8[6],ymm9[6],ymm8[7],ymm9[7],ymm8[16],ymm9[16],ymm8[17],ymm9[17],ymm8[18],ymm9[18],ymm8[19],ymm9[19],ymm8[20],ymm9[20],ymm8[21],ymm9[21],ymm8[22],ymm9[22],ymm8[23],ymm9[23]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm9 = ymm4[8],ymm5[8],ymm4[9],ymm5[9],ymm4[10],ymm5[10],ymm4[11],ymm5[11],ymm4[12],ymm5[12],ymm4[13],ymm5[13],ymm4[14],ymm5[14],ymm4[15],ymm5[15],ymm4[24],ymm5[24],ymm4[25],ymm5[25],ymm4[26],ymm5[26],ymm4[27],ymm5[27],ymm4[28],ymm5[28],ymm4[29],ymm5[29],ymm4[30],ymm5[30],ymm4[31],ymm5[31]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,24,25,22,23,28,29,30,31,30,31,30,31,30,31]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm8 = zmm8[2,2,2,3,6,6,6,7]
-; AVX512F-FAST-NEXT:    vbroadcasti64x4 {{.*#+}} zmm9 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512F-FAST-NEXT:    # zmm9 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-FAST-NEXT:    vpternlogq $202, %zmm6, %zmm8, %zmm9
+; AVX512F-FAST-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm6, %zmm8
 ; AVX512F-FAST-NEXT:    vpbroadcastq {{.*#+}} ymm10 = [6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0,6,5,8,7,0,9,0,0]
 ; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm1, %ymm6
-; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm8
-; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm8[0],ymm6[0],ymm8[1],ymm6[1],ymm8[2],ymm6[2],ymm8[3],ymm6[3],ymm8[4],ymm6[4],ymm8[5],ymm6[5],ymm8[6],ymm6[6],ymm8[7],ymm6[7],ymm8[16],ymm6[16],ymm8[17],ymm6[17],ymm8[18],ymm6[18],ymm8[19],ymm6[19],ymm8[20],ymm6[20],ymm8[21],ymm6[21],ymm8[22],ymm6[22],ymm8[23],ymm6[23]
-; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm8 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm8 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27,24,25,22,23,28,29,26,27,28,29,30,31]
-; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm6
+; AVX512F-FAST-NEXT:    vpshufb %ymm10, %ymm0, %ymm9
+; AVX512F-FAST-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm9[0],ymm6[0],ymm9[1],ymm6[1],ymm9[2],ymm6[2],ymm9[3],ymm6[3],ymm9[4],ymm6[4],ymm9[5],ymm6[5],ymm9[6],ymm6[6],ymm9[7],ymm6[7],ymm9[16],ymm6[16],ymm9[17],ymm6[17],ymm9[18],ymm6[18],ymm9[19],ymm6[19],ymm9[20],ymm6[20],ymm9[21],ymm6[21],ymm9[22],ymm6[22],ymm9[23],ymm6[23]
+; AVX512F-FAST-NEXT:    vpunpckhbw {{.*#+}} ymm9 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20,21,26,27,24,25,22,23,28,29,26,27,28,29,30,31]
+; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm6, %zmm6
 ; AVX512F-FAST-NEXT:    vpermq {{.*#+}} zmm6 = zmm6[2,2,2,3,6,6,6,7]
-; AVX512F-FAST-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm9, %zmm6
+; AVX512F-FAST-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm8, %zmm6
 ; AVX512F-FAST-NEXT:    vmovdqa (%rcx), %xmm9
 ; AVX512F-FAST-NEXT:    vpshufb %xmm7, %xmm9, %xmm8
 ; AVX512F-FAST-NEXT:    vmovdqa (%rdx), %xmm11

diff  --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
index a4f21ea2a9154d..04c1bd53225206 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
@@ -3437,8 +3437,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-SLOW-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm15[0,1,2,3,4,5,5,6]
 ; AVX512F-SLOW-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[2,2,3,3]
 ; AVX512F-SLOW-NEXT:    vpermq {{.*#+}} ymm7 = ymm7[0,1,0,1]
-; AVX512F-SLOW-NEXT:    vmovdqa {{.*#+}} ymm9 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; AVX512F-SLOW-NEXT:    vpandn %ymm7, %ymm9, %ymm7
+; AVX512F-SLOW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm7
 ; AVX512F-SLOW-NEXT:    vpshufb {{.*#+}} ymm9 = zero,ymm4[13,u,u,u,u],zero,zero,ymm4[14,u,u,u,u],zero,zero,ymm4[15,u,u,u,u],zero,zero,ymm4[16,u,u,u,u],zero,zero,ymm4[17,u,u]
 ; AVX512F-SLOW-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
 ; AVX512F-SLOW-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm7
@@ -3547,8 +3546,7 @@ define void @store_i8_stride7_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512F-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm10 = [2,2,3,3,2,2,3,3]
 ; AVX512F-FAST-NEXT:    # ymm10 = mem[0,1,0,1]
 ; AVX512F-FAST-NEXT:    vpermd %ymm8, %ymm10, %ymm8
-; AVX512F-FAST-NEXT:    vmovdqa {{.*#+}} ymm10 = [255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255]
-; AVX512F-FAST-NEXT:    vpandn %ymm8, %ymm10, %ymm8
+; AVX512F-FAST-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm8
 ; AVX512F-FAST-NEXT:    vmovdqa64 %ymm17, %ymm13
 ; AVX512F-FAST-NEXT:    vpshufb {{.*#+}} ymm10 = zero,ymm13[13,u,u,u,u],zero,zero,ymm13[14,u,u,u,u],zero,zero,ymm13[15,u,u,u,u],zero,zero,ymm13[16,u,u,u,u],zero,zero,ymm13[17,u,u]
 ; AVX512F-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
@@ -8415,250 +8413,253 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512BW-FAST-LABEL: store_i8_stride7_vf64:
 ; AVX512BW-FAST:       # %bb.0:
-; AVX512BW-FAST-NEXT:    subq $200, %rsp
+; AVX512BW-FAST-NEXT:    subq $72, %rsp
 ; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm9
-; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm9, (%rsp) # 64-byte Spill
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rsi), %zmm5
-; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdx), %zmm2
-; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512BW-FAST-NEXT:    vmovdqa (%rax), %ymm4
-; AVX512BW-FAST-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rax), %ymm13
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %zmm7
+; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm7, (%rsp) # 64-byte Spill
+; AVX512BW-FAST-NEXT:    vmovdqa (%rax), %ymm3
+; AVX512BW-FAST-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-FAST-NEXT:    vmovdqa 32(%rax), %ymm11
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm0 = [12,13,2,3,12,13,0,1,14,15,2,3,0,1,14,15,28,29,18,19,28,29,16,17,30,31,18,19,16,17,30,31]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm0, %ymm4, %ymm1
-; AVX512BW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
-; AVX512BW-FAST-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512BW-FAST-NEXT:    vpermw %ymm4, %ymm3, %ymm3
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm6
-; AVX512BW-FAST-NEXT:    vmovdqa (%r9), %ymm15
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm17, %ymm15, %ymm7
-; AVX512BW-FAST-NEXT:    vmovdqa (%r8), %ymm1
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm20, %ymm1, %ymm8
-; AVX512BW-FAST-NEXT:    vmovdqa64 %ymm1, %ymm24
-; AVX512BW-FAST-NEXT:    vpor %ymm7, %ymm8, %ymm7
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%r9), %xmm25
-; AVX512BW-FAST-NEXT:    vmovdqa (%r8), %xmm10
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm10[8],xmm25[8],xmm10[9],xmm25[9],xmm10[10],xmm25[10],xmm10[11],xmm25[11],xmm10[12],xmm25[12],xmm10[13],xmm25[13],xmm10[14],xmm25[14],xmm10[15],xmm25[15]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm8 = xmm8[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm22
+; AVX512BW-FAST-NEXT:    vpshufb %ymm0, %ymm3, %ymm1
+; AVX512BW-FAST-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [4,5,4,5,5,6,5,6,4,5,4,5,5,6,5,6]
+; AVX512BW-FAST-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vpermw %ymm3, %ymm2, %ymm2
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm4
+; AVX512BW-FAST-NEXT:    vmovdqa (%r9), %ymm2
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = [13,128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128]
+; AVX512BW-FAST-NEXT:    vpshufb %ymm1, %ymm2, %ymm5
+; AVX512BW-FAST-NEXT:    vmovdqa %ymm2, %ymm3
+; AVX512BW-FAST-NEXT:    vmovdqa (%r8), %ymm2
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [128,128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128]
+; AVX512BW-FAST-NEXT:    vpshufb %ymm16, %ymm2, %ymm6
+; AVX512BW-FAST-NEXT:    vmovdqa %ymm2, %ymm12
+; AVX512BW-FAST-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512BW-FAST-NEXT:    vmovdqa (%r9), %xmm2
+; AVX512BW-FAST-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-FAST-NEXT:    vmovdqa (%r8), %xmm6
+; AVX512BW-FAST-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm6 = xmm6[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm21
 ; AVX512BW-FAST-NEXT:    movabsq $2323999253380730912, %r10 # imm = 0x2040810204081020
 ; AVX512BW-FAST-NEXT:    kmovq %r10, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm6, %zmm22 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %ymm1
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm21, %ymm1, %ymm6
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm1, %ymm7
-; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %ymm1
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm23, %ymm1, %ymm11
-; AVX512BW-FAST-NEXT:    vmovdqa %ymm1, %ymm8
-; AVX512BW-FAST-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512BW-FAST-NEXT:    vpor %ymm6, %ymm11, %ymm6
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm14
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %xmm16
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm11 = xmm16[8],xmm14[8],xmm16[9],xmm14[9],xmm16[10],xmm14[10],xmm16[11],xmm14[11],xmm16[12],xmm14[12],xmm16[13],xmm14[13],xmm16[14],xmm14[14],xmm16[15],xmm14[15]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm11 = xmm11[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm11 = ymm11[0,1,0,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm11, %zmm26
-; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %ymm11
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm28 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm28, %ymm11, %ymm6
-; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %ymm12
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm29 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
-; AVX512BW-FAST-NEXT:    vpshufb %ymm29, %ymm12, %ymm18
-; AVX512BW-FAST-NEXT:    vporq %ymm6, %ymm18, %ymm6
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %xmm18
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rsi), %xmm19
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm27 = xmm19[8],xmm18[8],xmm19[9],xmm18[9],xmm19[10],xmm18[10],xmm19[11],xmm18[11],xmm19[12],xmm18[12],xmm19[13],xmm18[13],xmm19[14],xmm18[14],xmm19[15],xmm18[15]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm27 = xmm27[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm27 = ymm27[0,1,0,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm6, %zmm27, %zmm6
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm4, %zmm21 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %ymm2
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm19 = [0,1,0,1,14,128,14,15,0,1,14,15,128,13,14,15,16,17,16,128,30,31,30,31,16,17,128,31,28,29,30,31]
+; AVX512BW-FAST-NEXT:    vpshufb %ymm19, %ymm2, %ymm4
+; AVX512BW-FAST-NEXT:    vmovdqa %ymm2, %ymm5
+; AVX512BW-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %ymm2
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm20 = [128,128,128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128]
+; AVX512BW-FAST-NEXT:    vpshufb %ymm20, %ymm2, %ymm9
+; AVX512BW-FAST-NEXT:    vmovdqa64 %ymm2, %ymm25
+; AVX512BW-FAST-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512BW-FAST-NEXT:    vpor %ymm4, %ymm9, %ymm4
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdx), %xmm13
+; AVX512BW-FAST-NEXT:    vmovdqa (%rcx), %xmm15
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm9 = xmm15[8],xmm13[8],xmm15[9],xmm13[9],xmm15[10],xmm13[10],xmm15[11],xmm13[11],xmm15[12],xmm13[12],xmm15[13],xmm13[13],xmm15[14],xmm13[14],xmm15[15],xmm13[15]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm9 = xmm9[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm9, %zmm22
+; AVX512BW-FAST-NEXT:    vmovdqa (%rdi), %ymm14
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [0,1,14,128,12,13,0,1,14,15,128,3,12,13,2,3,16,128,30,31,28,29,16,17,128,31,18,19,28,29,18,128]
+; AVX512BW-FAST-NEXT:    vpshufb %ymm23, %ymm14, %ymm4
+; AVX512BW-FAST-NEXT:    vmovdqa (%rsi), %ymm10
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm24 = [128,128,128,14,128,128,128,128,128,128,15,128,128,128,128,128,128,16,128,128,128,128,128,128,17,128,128,128,128,128,128,18]
+; AVX512BW-FAST-NEXT:    vpshufb %ymm24, %ymm10, %ymm17
+; AVX512BW-FAST-NEXT:    vporq %ymm4, %ymm17, %ymm4
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdi), %xmm17
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rsi), %xmm18
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm26 = xmm18[8],xmm17[8],xmm18[9],xmm17[9],xmm18[10],xmm17[10],xmm18[11],xmm17[11],xmm18[12],xmm17[12],xmm18[13],xmm17[13],xmm18[14],xmm17[14],xmm18[15],xmm17[15]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm26 = xmm26[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm26[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm4, %zmm26, %zmm8
 ; AVX512BW-FAST-NEXT:    movabsq $435749860008887046, %r10 # imm = 0x60C183060C18306
 ; AVX512BW-FAST-NEXT:    kmovq %r10, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm26, %zmm6 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm22, %zmm8 {%k1}
 ; AVX512BW-FAST-NEXT:    movabsq $4066998693416279096, %r10 # imm = 0x3870E1C3870E1C38
 ; AVX512BW-FAST-NEXT:    kmovq %r10, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm22, %zmm6 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
-; AVX512BW-FAST-NEXT:    vpermw %ymm13, %ymm22, %ymm22
-; AVX512BW-FAST-NEXT:    vpshufb %ymm0, %ymm13, %ymm0
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm22, %zmm0, %zmm22
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%r9), %ymm27
-; AVX512BW-FAST-NEXT:    vpshufb %ymm17, %ymm27, %ymm17
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%r8), %ymm1
-; AVX512BW-FAST-NEXT:    vpshufb %ymm20, %ymm1, %ymm20
-; AVX512BW-FAST-NEXT:    vporq %ymm17, %ymm20, %ymm17
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm20 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm1[20],zero,ymm1[18],zero,ymm1[20,21,20,21],zero,ymm1[19],zero,ymm1[19,20,21,22],zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm20[2,3,2,3]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm26 = ymm27[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm27[20],zero,ymm27[18],zero,zero,zero,zero,ymm27[21],zero,ymm27[19],zero,zero,zero,zero,ymm27[22]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm26 = ymm26[2,3,2,3]
-; AVX512BW-FAST-NEXT:    vporq %ymm20, %ymm26, %ymm20
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm17, %zmm26
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm21, %zmm8 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10]
+; AVX512BW-FAST-NEXT:    vpermw %ymm11, %ymm21, %ymm21
+; AVX512BW-FAST-NEXT:    vpshufb %ymm0, %ymm11, %ymm0
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm0, %zmm21
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%r9), %ymm28
+; AVX512BW-FAST-NEXT:    vpshufb %ymm1, %ymm28, %ymm1
+; AVX512BW-FAST-NEXT:    vmovdqa 32(%r8), %ymm0
+; AVX512BW-FAST-NEXT:    vpshufb %ymm16, %ymm0, %ymm16
+; AVX512BW-FAST-NEXT:    vporq %ymm1, %ymm16, %ymm1
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,20,128,18,128,128,128,128,21,128,19,128,128,128,128,22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,128,128,25,128,23,128,128,128,128,26,128,24,128,128>
+; AVX512BW-FAST-NEXT:    vpshufb %ymm2, %ymm28, %ymm16
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm16 = ymm16[2,3,2,3]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm22 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[20],zero,ymm0[18],zero,ymm0[20,21,20,21],zero,ymm0[19],zero,ymm0[19,20,21,22],zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm22[2,3,2,3]
+; AVX512BW-FAST-NEXT:    vporq %ymm22, %ymm16, %ymm16
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm16, %zmm1, %zmm1
 ; AVX512BW-FAST-NEXT:    movabsq $145249953336295682, %r10 # imm = 0x204081020408102
 ; AVX512BW-FAST-NEXT:    kmovq %r10, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm22, %zmm26 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdx), %ymm22
-; AVX512BW-FAST-NEXT:    vpshufb %ymm21, %ymm22, %ymm17
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rcx), %ymm30
-; AVX512BW-FAST-NEXT:    vpshufb %ymm23, %ymm30, %ymm20
-; AVX512BW-FAST-NEXT:    vporq %ymm17, %ymm20, %ymm17
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm20 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm22[18],zero,ymm22[18,19,20,21],zero,ymm22[19],zero,ymm22[25,26,27,22],zero,ymm22[20],zero
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm21, %zmm1 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdx), %ymm26
+; AVX512BW-FAST-NEXT:    vpshufb %ymm19, %ymm26, %ymm16
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rcx), %ymm29
+; AVX512BW-FAST-NEXT:    vpshufb %ymm20, %ymm29, %ymm19
+; AVX512BW-FAST-NEXT:    vporq %ymm16, %ymm19, %ymm16
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = <u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,18,128,128,128,128,21,128,19,128,128,128,128,22,128,20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,25,128,23,128,128,128,128,26,128,24,128,128,128,128>
+; AVX512BW-FAST-NEXT:    vpshufb %ymm2, %ymm29, %ymm20
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm20[2,3,2,3]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm21 = ymm30[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm30[18],zero,zero,zero,zero,ymm30[21],zero,ymm30[19],zero,zero,zero,zero,ymm30[22],zero,ymm30[20]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm21 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm26[18],zero,ymm26[18,19,20,21],zero,ymm26[19],zero,ymm26[25,26,27,22],zero,ymm26[20],zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm21 = ymm21[2,3,2,3]
-; AVX512BW-FAST-NEXT:    vporq %ymm20, %ymm21, %ymm20
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm17, %zmm21
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %ymm31
-; AVX512BW-FAST-NEXT:    vpshufb %ymm28, %ymm31, %ymm17
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%rsi), %ymm0
-; AVX512BW-FAST-NEXT:    vpshufb %ymm29, %ymm0, %ymm20
-; AVX512BW-FAST-NEXT:    vporq %ymm17, %ymm20, %ymm17
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm20 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm31[18,19,20,21],zero,ymm31[19],zero,ymm31[21,20,21,22],zero,ymm31[20],zero,ymm31[22,23]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm20 = ymm20[2,3,2,3]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm23 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm0[21],zero,ymm0[19],zero,zero,zero,zero,ymm0[22],zero,ymm0[20],zero,zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm23 = ymm23[2,3,2,3]
-; AVX512BW-FAST-NEXT:    vporq %ymm20, %ymm23, %ymm20
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %zmm23
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm17, %zmm17
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%r8), %zmm3
-; AVX512BW-FAST-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512BW-FAST-NEXT:    vporq %ymm21, %ymm20, %ymm20
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm16, %zmm20
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %ymm30
+; AVX512BW-FAST-NEXT:    vpshufb %ymm23, %ymm30, %ymm16
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rsi), %ymm31
+; AVX512BW-FAST-NEXT:    vpshufb %ymm24, %ymm31, %ymm21
+; AVX512BW-FAST-NEXT:    vporq %ymm16, %ymm21, %ymm16
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm21 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm30[18,19,20,21],zero,ymm30[19],zero,ymm30[21,20,21,22],zero,ymm30[20],zero,ymm30[22,23]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm21 = ymm21[2,3,2,3]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm22 = ymm31[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm31[21],zero,ymm31[19],zero,zero,zero,zero,ymm31[22],zero,ymm31[20],zero,zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm22 = ymm22[2,3,2,3]
+; AVX512BW-FAST-NEXT:    vporq %ymm21, %ymm22, %ymm21
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rsi), %zmm24
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm16, %zmm16
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rdx), %zmm22
 ; AVX512BW-FAST-NEXT:    movabsq $3485998880071096368, %r10 # imm = 0x3060C183060C1830
 ; AVX512BW-FAST-NEXT:    kmovq %r10, %k2
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm21, %zmm17 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%r9), %zmm21
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm20, %zmm16 {%k2}
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rcx), %zmm23
 ; AVX512BW-FAST-NEXT:    movabsq $-4357498600088870461, %r10 # imm = 0xC3870E1C3870E1C3
 ; AVX512BW-FAST-NEXT:    kmovq %r10, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm26, %zmm17 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqa64 (%rax), %zmm26
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm22[0,1,2,3],zmm23[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm22 = zmm22[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,zmm22[23],zero,zmm22[21,22,23,26],zero,zmm22[24],zero,zmm22[28,29,26,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,59],zero,zero,zero,zero,zmm22[62],zero,zmm22[60],zero,zero,zero,zero,zmm22[63],zero,zmm22[61],zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm22 = zmm22[2,3,2,3,6,7,6,7]
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm28 = zmm30[0,1,2,3],zmm2[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm28 = zmm28[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm28[25],zero,zmm28[23],zero,zero,zero,zero,zmm28[26],zero,zmm28[24],zero,zero,zero,zero,zmm28[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm28[62],zero,zmm28[60],zero,zero,zero,zero,zmm28[63],zero,zmm28[61],zero,zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm28 = zmm28[2,3,2,3,6,7,6,7]
-; AVX512BW-FAST-NEXT:    vporq %zmm22, %zmm28, %zmm29
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %xmm28
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm9[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,zmm0[23],zero,zero,zero,zero,zmm0[26],zero,zmm0[24],zero,zero,zero,zero,zmm0[27],zero,zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,60,61,62],zero,zmm0[60],zero,zmm0[62,63,62,63],zero,zmm0[61],zero,zmm0[63,60,61]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm22 = zmm31[0,1,2,3],zmm5[4,5,6,7]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm22 = zmm22[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm22[23],zero,zero,zero,zero,zmm22[26],zero,zmm22[24],zero,zero,zero,zero,zmm22[27],zero,zmm22[25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zmm22[62],zero,zmm22[60],zero,zero,zero,zero,zmm22[63],zero,zmm22[61],zero,zero,zero
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm22 = zmm22[2,3,2,3,6,7,6,7]
-; AVX512BW-FAST-NEXT:    vporq %zmm0, %zmm22, %zmm22
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rsi), %xmm30
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm16 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%r8), %zmm27
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm26[0,1,2,3],zmm23[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25],zero,zmm1[23],zero,zmm1[21,22,23,26],zero,zmm1[24],zero,zmm1[28,29,26,27,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero,zmm1[61],zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm29[0,1,2,3],zmm22[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm20 = zmm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm20[25],zero,zmm20[23],zero,zero,zero,zero,zmm20[26],zero,zmm20[24],zero,zero,zero,zero,zmm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm20[62],zero,zmm20[60],zero,zero,zero,zero,zmm20[63],zero,zmm20[61],zero,zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm20 = zmm20[2,3,2,3,6,7,6,7]
+; AVX512BW-FAST-NEXT:    vporq %zmm1, %zmm20, %zmm1
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%r9), %zmm26
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm20 = zmm31[0,1,2,3],zmm7[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm20 = zmm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,zmm20[23],zero,zero,zero,zero,zmm20[26],zero,zmm20[24],zero,zero,zero,zero,zmm20[27],zero,zmm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,60,61,62],zero,zmm20[60],zero,zmm20[62,63,62,63],zero,zmm20[61],zero,zmm20[63,60,61]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm20 = zmm20[2,3,2,3,6,7,6,7]
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm29 = zmm30[0,1,2,3],zmm24[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm29 = zmm29[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm29[23],zero,zero,zero,zero,zmm29[26],zero,zmm29[24],zero,zero,zero,zero,zmm29[27],zero,zmm29[25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zmm29[62],zero,zmm29[60],zero,zero,zero,zero,zmm29[63],zero,zmm29[61],zero,zero,zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm29 = zmm29[2,3,2,3,6,7,6,7]
+; AVX512BW-FAST-NEXT:    vporq %zmm20, %zmm29, %zmm20
+; AVX512BW-FAST-NEXT:    vmovdqa64 (%rax), %zmm29
 ; AVX512BW-FAST-NEXT:    movabsq $1742999440035548184, %rax # imm = 0x183060C183060C18
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm29, %zmm22 {%k1}
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm21[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm20 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdi), %xmm31
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm26[4,5,6,7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm0 = zmm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,24,25,24,25],zero,zmm0[23],zero,zmm0[23,24,25,26],zero,zmm0[24],zero,zmm0[30,31,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,61],zero,zmm0[59],zero,zero,zero,zero,zmm0[62],zero,zmm0[60],zero,zero,zero,zero,zmm0[63],zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7]
-; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm27[0,1,2,3],zmm3[4,5,6,7]
+; AVX512BW-FAST-NEXT:    vshufi64x2 {{.*#+}} zmm1 = zmm28[0,1,2,3],zmm27[4,5,6,7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm1[25],zero,zmm1[23],zero,zero,zero,zero,zmm1[26],zero,zmm1[24],zero,zero,zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm1[59],zero,zero,zero,zero,zmm1[62],zero,zmm1[60],zero,zero,zero,zero,zmm1[63],zero,zmm1[61]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
 ; AVX512BW-FAST-NEXT:    vporq %zmm0, %zmm1, %zmm0
 ; AVX512BW-FAST-NEXT:    movabsq $6971997760142192736, %rax # imm = 0x60C183060C183060
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k1
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm22 {%k1}
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm20 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm0 = [11,13,12,11,12,13,13,12,11,13,12,11,12,13,13,12,62,61,62,63,63,62,62,63,62,61,62,63,63,62,62,63]
-; AVX512BW-FAST-NEXT:    vpermi2w %zmm26, %zmm13, %zmm0
+; AVX512BW-FAST-NEXT:    vpermi2w %zmm29, %zmm11, %zmm0
 ; AVX512BW-FAST-NEXT:    movabsq $-9150747060186627967, %rax # imm = 0x8102040810204081
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k3
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm22 {%k3}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm11[28],zero,ymm11[30,31,30,31],zero,ymm11[29],zero,ymm11[31,28,29]
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm20 {%k3}
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,28,29,30],zero,ymm14[28],zero,ymm14[30,31,30,31],zero,ymm14[29],zero,ymm14[31,28,29]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm12[30],zero,ymm12[28],zero,zero,zero,zero,ymm12[31],zero,ymm12[29],zero,zero,zero
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm1 = ymm10[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,ymm10[30],zero,ymm10[28],zero,zero,zero,zero,ymm10[31],zero,ymm10[29],zero,zero,zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX512BW-FAST-NEXT:    vpor %ymm0, %ymm1, %ymm0
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm28[0],xmm30[0],xmm28[1],xmm30[1],xmm28[2],xmm30[2],xmm28[3],xmm30[3],xmm28[4],xmm30[4],xmm28[5],xmm30[5],xmm28[6],xmm30[6],xmm28[7],xmm30[7]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm3
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm8[30],zero,ymm8[28],zero,zero,zero,zero,ymm8[31],zero,ymm8[29],zero
+; AVX512BW-FAST-NEXT:    vmovdqa 32(%rsi), %xmm7
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm31[0],xmm7[0],xmm31[1],xmm7[1],xmm31[2],xmm7[2],xmm31[3],xmm7[3],xmm31[4],xmm7[4],xmm31[5],xmm7[5],xmm31[6],xmm7[6],xmm31[7],xmm7[7]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm2 = xmm2[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,1]
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm6
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm25[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,zero,zero,ymm25[30],zero,ymm25[28],zero,zero,zero,zero,ymm25[31],zero,ymm25[29],zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm27 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm7[30],zero,ymm7[28],zero,zero,zero,zero,ymm7[31],zero,ymm7[29],zero,zero
-; AVX512BW-FAST-NEXT:    vmovdqa64 %ymm7, %ymm20
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm27 = ymm27[2,3,2,3]
-; AVX512BW-FAST-NEXT:    vporq %ymm0, %ymm27, %ymm27
-; AVX512BW-FAST-NEXT:    vmovdqa64 32(%rdx), %xmm31
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm28 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,ymm5[30],zero,ymm5[28],zero,zero,zero,zero,ymm5[31],zero,ymm5[29],zero,zero
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[2,3,2,3]
+; AVX512BW-FAST-NEXT:    vporq %ymm0, %ymm28, %ymm28
+; AVX512BW-FAST-NEXT:    vmovdqa 32(%rdx), %xmm2
 ; AVX512BW-FAST-NEXT:    vmovdqa 32(%rcx), %xmm1
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm31[0],xmm1[0],xmm31[1],xmm1[1],xmm31[2],xmm1[2],xmm31[3],xmm1[3],xmm31[4],xmm1[4],xmm31[5],xmm1[5],xmm31[6],xmm1[6],xmm31[7],xmm1[7]
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm7, %xmm0, %xmm0
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm9 = <4,5,0,1,u,u,u,6,7,2,3,u,u,u,8,9>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm9, %xmm0, %xmm0
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm27, %zmm27
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm3, %zmm27 {%k2}
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm15[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm15[27],zero,zero,zero,zero,ymm15[30],zero,ymm15[28],zero,zero,zero,zero,ymm15[31],zero
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm28, %zmm28
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm6, %zmm28 {%k2}
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm0 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,29],zero,ymm3[27],zero,zero,zero,zero,ymm3[30],zero,ymm3[28],zero,zero,zero,zero,ymm3[31],zero
+; AVX512BW-FAST-NEXT:    vmovdqa64 %ymm3, %ymm30
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm3 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm24[27],zero,zero,zero,zero,ymm24[30],zero,ymm24[28],zero,zero,zero,zero,ymm24[31],zero,ymm24[29]
-; AVX512BW-FAST-NEXT:    vmovdqa64 %ymm24, %ymm9
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
-; AVX512BW-FAST-NEXT:    vpor %ymm0, %ymm3, %ymm2
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%r9), %xmm4
-; AVX512BW-FAST-NEXT:    vmovdqa 32(%r8), %xmm3
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm8 = <u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm8, %xmm0, %xmm0
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} ymm6 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm12[27],zero,zero,zero,zero,ymm12[30],zero,ymm12[28],zero,zero,zero,zero,ymm12[31],zero,ymm12[29]
+; AVX512BW-FAST-NEXT:    vmovdqa64 %ymm12, %ymm21
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
+; AVX512BW-FAST-NEXT:    vpor %ymm0, %ymm6, %ymm3
+; AVX512BW-FAST-NEXT:    vmovdqa 32(%r9), %xmm5
+; AVX512BW-FAST-NEXT:    vmovdqa 32(%r8), %xmm4
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm12 = <u,u,4,5,0,1,u,u,u,6,7,2,3,u,u,u>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm0, %xmm0
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[0,1,0,1]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
-; AVX512BW-FAST-NEXT:    vpermw %zmm26, %zmm2, %zmm2
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [14,13,14,15,15,14,14,15,14,13,14,15,15,14,14,15,17,17,16,16,17,17,16,16,20,21,17,17,17,17,16,16]
+; AVX512BW-FAST-NEXT:    vpermw %zmm29, %zmm3, %zmm3
 ; AVX512BW-FAST-NEXT:    movabsq $580999813345182728, %rax # imm = 0x810204081020408
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k2
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm2, %zmm0 {%k2}
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm3, %zmm0 {%k2}
 ; AVX512BW-FAST-NEXT:    movabsq $1016749673354069774, %rax # imm = 0xE1C3870E1C3870E
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k2
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm27 {%k2}
-; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm5 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm5, %xmm1, %xmm2
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm0, %zmm28 {%k2}
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm6 = <u,u,u,128,7,128,5,u,u,u,128,8,128,6,u,u>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm6, %xmm1, %xmm3
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm0 = <u,u,u,7,128,5,128,u,u,u,8,128,6,128,u,u>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm0, %xmm31, %xmm24
-; AVX512BW-FAST-NEXT:    vporq %xmm2, %xmm24, %xmm2
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm31[8],xmm1[9],xmm31[9],xmm1[10],xmm31[10],xmm1[11],xmm31[11],xmm1[12],xmm31[12],xmm1[13],xmm31[13],xmm1[14],xmm31[14],xmm1[15],xmm31[15]
+; AVX512BW-FAST-NEXT:    vpshufb %xmm0, %xmm2, %xmm19
+; AVX512BW-FAST-NEXT:    vporq %xmm3, %xmm19, %xmm3
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm1 = xmm1[6,3,2,u,u,u,9,8,5,4,u,u,u,11,10,7]
-; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm1
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm3, %zmm1
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm2 = <u,128,7,128,5,u,u,u,128,8,128,6,u,u,u,128>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm2, %xmm30, %xmm24
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm31 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm31, %xmm28, %xmm29
-; AVX512BW-FAST-NEXT:    vporq %xmm24, %xmm29, %xmm24
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm28 = xmm30[8],xmm28[8],xmm30[9],xmm28[9],xmm30[10],xmm28[10],xmm30[11],xmm28[11],xmm30[12],xmm28[12],xmm30[13],xmm28[13],xmm30[14],xmm28[14],xmm30[15],xmm28[15]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm28 = xmm28[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
-; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm28, %zmm24, %zmm24
+; AVX512BW-FAST-NEXT:    vpshufb %xmm2, %xmm7, %xmm3
+; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm19 = <u,7,128,5,128,u,u,u,8,128,6,128,u,u,u,9>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm19, %xmm31, %xmm25
+; AVX512BW-FAST-NEXT:    vporq %xmm3, %xmm25, %xmm3
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm7[8],xmm31[8],xmm7[9],xmm31[9],xmm7[10],xmm31[10],xmm7[11],xmm31[11],xmm7[12],xmm31[12],xmm7[13],xmm31[13],xmm7[14],xmm31[14],xmm7[15],xmm31[15]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm7 = xmm7[2,u,u,u,9,8,5,4,u,u,u,11,10,7,6,u]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm7, %zmm3, %zmm3
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
-; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm24 = zmm24[0,1,0,1,4,5,4,5]
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm24 {%k1}
+; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm25 = zmm3[0,1,0,1,4,5,4,5]
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm25 {%k1}
 ; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = <128,4,u,u,u,128,7,128,5,u,u,u,128,8,128,6>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm1, %xmm4, %xmm28
-; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} xmm29 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
-; AVX512BW-FAST-NEXT:    vpshufb %xmm29, %xmm3, %xmm30
-; AVX512BW-FAST-NEXT:    vporq %xmm28, %xmm30, %xmm28
-; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
-; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm3, %zmm28, %zmm3
+; AVX512BW-FAST-NEXT:    vpshufb %xmm1, %xmm5, %xmm3
+; AVX512BW-FAST-NEXT:    vmovdqa {{.*#+}} xmm7 = <4,128,u,u,u,7,128,5,128,u,u,u,8,128,6,128>
+; AVX512BW-FAST-NEXT:    vpshufb %xmm7, %xmm4, %xmm31
+; AVX512BW-FAST-NEXT:    vporq %xmm3, %xmm31, %xmm3
+; AVX512BW-FAST-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm4 = xmm4[u,6,7,2,3,u,u,u,8,9,4,5,u,u,u,10]
+; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm4, %zmm3, %zmm3
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [2,2,2,4,2,2,2,4,3,3,3,3,2,2,2,4,52,53,52,53,53,54,53,54,52,53,52,53,53,54,53,54]
-; AVX512BW-FAST-NEXT:    vpermi2w %zmm26, %zmm13, %zmm4
+; AVX512BW-FAST-NEXT:    vpermi2w %zmm29, %zmm11, %zmm4
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm3 = zmm3[0,1,0,1,4,5,4,5]
 ; AVX512BW-FAST-NEXT:    movabsq $290499906672591364, %rax # imm = 0x408102040810204
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k2
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm4, %zmm3 {%k2}
 ; AVX512BW-FAST-NEXT:    movabsq $-8714997200177740921, %rax # imm = 0x870E1C3870E1C387
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k2
-; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm3, %zmm24 {%k2}
-; AVX512BW-FAST-NEXT:    vpshufb %xmm5, %xmm16, %xmm3
-; AVX512BW-FAST-NEXT:    vpshufb %xmm0, %xmm14, %xmm0
+; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm3, %zmm25 {%k2}
+; AVX512BW-FAST-NEXT:    vpshufb %xmm6, %xmm15, %xmm3
+; AVX512BW-FAST-NEXT:    vpshufb %xmm0, %xmm13, %xmm0
 ; AVX512BW-FAST-NEXT:    vpor %xmm3, %xmm0, %xmm0
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm14[0],xmm16[0],xmm14[1],xmm16[1],xmm14[2],xmm16[2],xmm14[3],xmm16[3],xmm14[4],xmm16[4],xmm14[5],xmm16[5],xmm14[6],xmm16[6],xmm14[7],xmm16[7]
-; AVX512BW-FAST-NEXT:    vpshufb %xmm7, %xmm3, %xmm3
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm13[0],xmm15[0],xmm13[1],xmm15[1],xmm13[2],xmm15[2],xmm13[3],xmm15[3],xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
+; AVX512BW-FAST-NEXT:    vpshufb %xmm9, %xmm3, %xmm3
 ; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm0, %zmm3, %zmm0
-; AVX512BW-FAST-NEXT:    vpshufb %xmm2, %xmm19, %xmm2
-; AVX512BW-FAST-NEXT:    vpshufb %xmm31, %xmm18, %xmm3
+; AVX512BW-FAST-NEXT:    vpshufb %xmm2, %xmm18, %xmm2
+; AVX512BW-FAST-NEXT:    vpshufb %xmm19, %xmm17, %xmm3
 ; AVX512BW-FAST-NEXT:    vpor %xmm2, %xmm3, %xmm2
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm18[0],xmm19[0],xmm18[1],xmm19[1],xmm18[2],xmm19[2],xmm18[3],xmm19[3],xmm18[4],xmm19[4],xmm18[5],xmm19[5],xmm18[6],xmm19[6],xmm18[7],xmm19[7]
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm17[0],xmm18[0],xmm17[1],xmm18[1],xmm17[2],xmm18[2],xmm17[3],xmm18[3],xmm17[4],xmm18[4],xmm17[5],xmm18[5],xmm17[6],xmm18[6],xmm17[7],xmm18[7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} xmm3 = xmm3[0,1,u,u,u,6,7,2,3,u,u,u,8,9,4,5]
 ; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm2, %zmm3, %zmm2
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm3 = zmm0[0,1,0,1,4,5,4,5]
@@ -8666,14 +8667,16 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    movabsq $871499720017774092, %rax # imm = 0xC183060C183060C
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k2
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm3, %zmm0 {%k2}
-; AVX512BW-FAST-NEXT:    vpshufb %xmm1, %xmm25, %xmm1
-; AVX512BW-FAST-NEXT:    vpshufb %xmm29, %xmm10, %xmm2
+; AVX512BW-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; AVX512BW-FAST-NEXT:    vpshufb %xmm1, %xmm3, %xmm1
+; AVX512BW-FAST-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; AVX512BW-FAST-NEXT:    vpshufb %xmm7, %xmm4, %xmm2
 ; AVX512BW-FAST-NEXT:    vpor %xmm1, %xmm2, %xmm1
-; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm10[0],xmm25[0],xmm10[1],xmm25[1],xmm10[2],xmm25[2],xmm10[3],xmm25[3],xmm10[4],xmm25[4],xmm10[5],xmm25[5],xmm10[6],xmm25[6],xmm10[7],xmm25[7]
-; AVX512BW-FAST-NEXT:    vpshufb %xmm8, %xmm2, %xmm2
+; AVX512BW-FAST-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; AVX512BW-FAST-NEXT:    vpshufb %xmm12, %xmm2, %xmm2
 ; AVX512BW-FAST-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm1
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm26, %zmm2 # 32-byte Folded Reload
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm2 # 32-byte Folded Reload
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [1,1,0,0,1,1,0,0,4,5,1,1,1,1,0,0,18,18,18,20,18,18,18,20,19,19,19,19,18,18,18,20]
 ; AVX512BW-FAST-NEXT:    vpermw %zmm2, %zmm3, %zmm3
 ; AVX512BW-FAST-NEXT:    movabsq $4647998506761461824, %rax # imm = 0x4081020408102040
@@ -8683,29 +8686,26 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    kmovq %rax, %k2
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm0 {%k2}
 ; AVX512BW-FAST-NEXT:    vmovdqu64 (%rsp), %zmm1 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm12, %zmm1, %zmm1
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm11, %zmm3, %zmm3
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm10, %zmm1, %zmm1
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm14, %zmm24, %zmm3
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19,20,21],zero,zmm1[19],zero,zmm1[21,20,21,22],zero,zmm1[20],zero,zmm1[22,23,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,57],zero,zmm1[55],zero,zero,zero,zero,zmm1[58],zero,zmm1[56],zero,zero,zero,zero,zmm1[59],zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm3[21],zero,zmm3[19],zero,zero,zero,zero,zmm3[22],zero,zmm3[20],zero,zero,zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm3[55],zero,zero,zero,zero,zmm3[58],zero,zmm3[56],zero,zero,zero,zero,zmm3[59],zero,zmm3[57]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
 ; AVX512BW-FAST-NEXT:    vporq %zmm1, %zmm3, %zmm1
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm20, %zmm3, %zmm3
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22, %zmm3 # 32-byte Folded Reload
 ; AVX512BW-FAST-NEXT:    vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm23, %zmm4 # 32-byte Folded Reload
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm4[18],zero,zero,zero,zero,zmm4[21],zero,zmm4[19],zero,zero,zero,zero,zmm4[22],zero,zmm4[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm4[57],zero,zmm4[55],zero,zero,zero,zero,zmm4[58],zero,zmm4[56],zero,zero,zero,zero
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18],zero,zmm3[18,19,20,21],zero,zmm3[19],zero,zmm3[25,26,27,22],zero,zmm3[20],zero,zmm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57],zero,zmm3[55],zero,zmm3[53,54,55,58],zero,zmm3[56],zero,zmm3[60,61,58,59]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm3 = zmm3[2,3,2,3,6,7,6,7]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm4[18],zero,zero,zero,zero,zmm4[21],zero,zmm4[19],zero,zero,zero,zero,zmm4[22],zero,zmm4[20,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zmm4[57],zero,zmm4[55],zero,zero,zero,zero,zmm4[58],zero,zmm4[56],zero,zero,zero,zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm4 = zmm4[2,3,2,3,6,7,6,7]
 ; AVX512BW-FAST-NEXT:    vporq %zmm3, %zmm4, %zmm3
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm3 {%k1}
-; AVX512BW-FAST-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm9, %zmm1, %zmm1
-; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm15, %zmm21, %zmm4
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm21, %zmm27, %zmm1
+; AVX512BW-FAST-NEXT:    vinserti64x4 $1, %ymm30, %zmm26, %zmm4
+; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm4[20],zero,zmm4[18],zero,zero,zero,zero,zmm4[21],zero,zmm4[19],zero,zero,zero,zero,zmm4[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm4[57],zero,zmm4[55],zero,zero,zero,zero,zmm4[58],zero,zmm4[56],zero,zero
 ; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm1 = zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,20],zero,zmm1[18],zero,zmm1[20,21,20,21],zero,zmm1[19],zero,zmm1[19,20,21,22],zero,zmm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,56,57,56,57],zero,zmm1[55],zero,zmm1[55,56,57,58],zero,zmm1[56],zero,zmm1[62,63]
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[2,3,2,3,6,7,6,7]
-; AVX512BW-FAST-NEXT:    vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zmm4[20],zero,zmm4[18],zero,zero,zero,zero,zmm4[21],zero,zmm4[19],zero,zero,zero,zero,zmm4[22,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,zero,zmm4[57],zero,zmm4[55],zero,zero,zero,zero,zmm4[58],zero,zmm4[56],zero,zero
 ; AVX512BW-FAST-NEXT:    vpermq {{.*#+}} zmm4 = zmm4[2,3,2,3,6,7,6,7]
 ; AVX512BW-FAST-NEXT:    vporq %zmm1, %zmm4, %zmm1
 ; AVX512BW-FAST-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [10,9,9,10,10,9,9,10,9,10,14,15,10,9,9,10,27,29,28,27,28,29,29,28,27,29,28,27,28,29,29,28]
@@ -8718,13 +8718,13 @@ define void @store_i8_stride7_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512BW-FAST-NEXT:    vmovdqu8 %zmm1, %zmm3 {%k1}
 ; AVX512BW-FAST-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm3, 128(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm17, 320(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm16, 320(%rax)
 ; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm0, (%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm24, 256(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm27, 192(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm6, 64(%rax)
-; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm22, 384(%rax)
-; AVX512BW-FAST-NEXT:    addq $200, %rsp
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm25, 256(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm28, 192(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm8, 64(%rax)
+; AVX512BW-FAST-NEXT:    vmovdqa64 %zmm20, 384(%rax)
+; AVX512BW-FAST-NEXT:    addq $72, %rsp
 ; AVX512BW-FAST-NEXT:    vzeroupper
 ; AVX512BW-FAST-NEXT:    retq
   %in.vec0 = load <64 x i8>, ptr %in.vecptr0, align 64

diff  --git a/llvm/test/CodeGen/X86/vector-shuffle-v192.ll b/llvm/test/CodeGen/X86/vector-shuffle-v192.ll
index dd1022151214e9..75c26f38eb9ed7 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-v192.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-v192.ll
@@ -214,23 +214,21 @@ define <64 x i8> @f2(ptr %p0) {
 ; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm4
 ; AVX512F-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,3,5,9,11,15,17,21,23,27,29],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpternlogq $234, %zmm2, %zmm0, %zmm4
+; AVX512F-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
 ; AVX512F-NEXT:    vmovdqa 96(%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
-; AVX512F-NEXT:    vmovdqa 112(%rdi), %xmm2
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm2, %xmm2
-; AVX512F-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512F-NEXT:    vmovdqa 112(%rdi), %xmm4
+; AVX512F-NEXT:    vpshufb %xmm6, %xmm4, %xmm4
+; AVX512F-NEXT:    vpor %xmm0, %xmm4, %xmm0
 ; AVX512F-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm0
-; AVX512F-NEXT:    vmovdqa 80(%rdi), %xmm2
-; AVX512F-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
-; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm2
-; AVX512F-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
-; AVX512F-NEXT:    vpor %xmm1, %xmm2, %xmm1
+; AVX512F-NEXT:    vmovdqa 80(%rdi), %xmm4
+; AVX512F-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX512F-NEXT:    vpshufb %xmm3, %xmm4, %xmm3
+; AVX512F-NEXT:    vpor %xmm1, %xmm3, %xmm1
 ; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
-; AVX512F-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm0
+; AVX512F-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: f2:
@@ -253,11 +251,9 @@ define <64 x i8> @f2(ptr %p0) {
 ; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
 ; AVX512BW-NEXT:    vmovdqa 32(%rdi), %ymm2
 ; AVX512BW-NEXT:    vinserti64x4 $1, 128(%rdi), %zmm2, %zmm2
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = <u,u,u,u,u,u,u,u,u,u,u,3,5,9,11,15,1,5,7,11,13,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,3,5,9,11,15,1,5,7,11,13,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    movabsq $8998403163813888, %rax # imm = 0x1FF800001FF800
 ; AVX512BW-NEXT:    kmovq %rax, %k1
-; AVX512BW-NEXT:    vpshufb %zmm4, %zmm2, %zmm0 {%k1}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm0 {%k1} = zmm2[u,u,u,u,u,u,u,u,u,u,u,3,5,9,11,15,17,21,23,27,29,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,35,37,41,43,47,49,53,55,59,61,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vmovdqa 96(%rdi), %xmm2
 ; AVX512BW-NEXT:    vpshufb %xmm5, %xmm2, %xmm2
 ; AVX512BW-NEXT:    vmovdqa 112(%rdi), %xmm4
@@ -501,23 +497,21 @@ define <64 x i8> @f4(ptr %p0) {
 ; AVX512F-NEXT:    vmovdqa 128(%rdi), %ymm4
 ; AVX512F-NEXT:    vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,2,4,8,10,14,16,20,22,26,28],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
 ; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
-; AVX512F-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255]
-; AVX512F-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
-; AVX512F-NEXT:    vpternlogq $234, %zmm2, %zmm0, %zmm4
+; AVX512F-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2
 ; AVX512F-NEXT:    vmovdqa 96(%rdi), %xmm0
 ; AVX512F-NEXT:    vpshufb %xmm5, %xmm0, %xmm0
-; AVX512F-NEXT:    vmovdqa 112(%rdi), %xmm2
-; AVX512F-NEXT:    vpshufb %xmm6, %xmm2, %xmm2
-; AVX512F-NEXT:    vpor %xmm0, %xmm2, %xmm0
+; AVX512F-NEXT:    vmovdqa 112(%rdi), %xmm4
+; AVX512F-NEXT:    vpshufb %xmm6, %xmm4, %xmm4
+; AVX512F-NEXT:    vpor %xmm0, %xmm4, %xmm0
 ; AVX512F-NEXT:    vinserti32x4 $2, %xmm0, %zmm0, %zmm0
-; AVX512F-NEXT:    vmovdqa 80(%rdi), %xmm2
-; AVX512F-NEXT:    vpshufb %xmm1, %xmm2, %xmm1
-; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm2
-; AVX512F-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
-; AVX512F-NEXT:    vpor %xmm1, %xmm2, %xmm1
+; AVX512F-NEXT:    vmovdqa 80(%rdi), %xmm4
+; AVX512F-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
+; AVX512F-NEXT:    vmovdqa 64(%rdi), %xmm4
+; AVX512F-NEXT:    vpshufb %xmm3, %xmm4, %xmm3
+; AVX512F-NEXT:    vpor %xmm1, %xmm3, %xmm1
 ; AVX512F-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
 ; AVX512F-NEXT:    vshufi64x2 {{.*#+}} zmm0 = zmm1[0,1,2,3],zmm0[4,5,6,7]
-; AVX512F-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm0
+; AVX512F-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
 ; AVX512F-NEXT:    retq
 ;
 ; AVX512BW-LABEL: f4:
@@ -540,11 +534,9 @@ define <64 x i8> @f4(ptr %p0) {
 ; AVX512BW-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
 ; AVX512BW-NEXT:    vmovdqa 32(%rdi), %ymm2
 ; AVX512BW-NEXT:    vinserti64x4 $1, 128(%rdi), %zmm2, %zmm2
-; AVX512BW-NEXT:    vbroadcasti64x4 {{.*#+}} zmm4 = <u,u,u,u,u,u,u,u,u,u,u,2,4,8,10,14,0,4,6,10,12,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,2,4,8,10,14,0,4,6,10,12,u,u,u,u,u,u,u,u,u,u,u>
-; AVX512BW-NEXT:    # zmm4 = mem[0,1,2,3,0,1,2,3]
 ; AVX512BW-NEXT:    movabsq $8998403163813888, %rax # imm = 0x1FF800001FF800
 ; AVX512BW-NEXT:    kmovq %rax, %k1
-; AVX512BW-NEXT:    vpshufb %zmm4, %zmm2, %zmm0 {%k1}
+; AVX512BW-NEXT:    vpshufb {{.*#+}} zmm0 {%k1} = zmm2[u,u,u,u,u,u,u,u,u,u,u,2,4,8,10,14,16,20,22,26,28,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,34,36,40,42,46,48,52,54,58,60,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512BW-NEXT:    vmovdqa 96(%rdi), %xmm2
 ; AVX512BW-NEXT:    vpshufb %xmm5, %xmm2, %xmm2
 ; AVX512BW-NEXT:    vmovdqa 112(%rdi), %xmm4

diff  --git a/llvm/test/CodeGen/X86/x86-interleaved-access.ll b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
index 216332943993e3..4f20a9d5db6ac5 100644
--- a/llvm/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
@@ -1054,8 +1054,8 @@ define void @interleaved_store_vf32_i8_stride3(<32 x i8> %a, <32 x i8> %b, <32 x
 ; AVX512-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm3
 ; AVX512-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm2 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
-; AVX512-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm2 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
+; AVX512-NEXT:    # zmm2 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
 ; AVX512-NEXT:    vpshufb %zmm2, %zmm1, %zmm1
@@ -1252,8 +1252,8 @@ define void @interleaved_store_vf64_i8_stride3(<64 x i8> %a, <64 x i8> %b, <64 x
 ; AVX512-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
 ; AVX512-NEXT:    vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm2
-; AVX512-NEXT:    vbroadcasti64x4 {{.*#+}} zmm3 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
-; AVX512-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3]
+; AVX512-NEXT:    vbroadcasti32x4 {{.*#+}} zmm3 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
+; AVX512-NEXT:    # zmm3 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
 ; AVX512-NEXT:    vpshufb %zmm3, %zmm2, %zmm2
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm4
 ; AVX512-NEXT:    vpshufb %zmm3, %zmm4, %zmm4


        


More information about the llvm-commits mailing list