[llvm] [X86][CodeGen] Support folding memory broadcast in X86InstrInfo::foldMemoryOperandImpl (PR #79761)

Shengchen Kan via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 30 08:25:38 PST 2024


https://github.com/KanRobert updated https://github.com/llvm/llvm-project/pull/79761

>From e770fe01047500e681b4f4954f78786c2e4d8ac3 Mon Sep 17 00:00:00 2001
From: Shengchen Kan <shengchen.kan at intel.com>
Date: Sun, 28 Jan 2024 12:00:19 +0800
Subject: [PATCH 1/2] [X86][CodeGen] Support folding memory broadcast in
 X86InstrInfo::foldMemoryOperandImpl

---
 llvm/lib/Target/X86/X86InstrAVX512.td         |    2 +-
 llvm/lib/Target/X86/X86InstrFoldTables.cpp    |   21 +-
 llvm/lib/Target/X86/X86InstrFoldTables.h      |    6 +
 llvm/lib/Target/X86/X86InstrInfo.cpp          |  127 ++
 llvm/lib/Target/X86/X86InstrInfo.h            |    7 +
 .../vector-interleaved-load-i16-stride-7.ll   |  364 ++--
 .../vector-interleaved-store-i16-stride-5.ll  | 1570 ++++++++---------
 .../vector-interleaved-store-i8-stride-8.ll   | 1280 +++++++-------
 8 files changed, 1761 insertions(+), 1616 deletions(-)

diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index bb5e22c714279..b588f660e2744 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -1067,7 +1067,7 @@ multiclass avx512_broadcast_rm_split<bits<8> opc, string OpcodeStr,
                         MaskInfo.RC:$src0))],
                       DestInfo.ExeDomain>, T8, PD, EVEX, EVEX_K, Sched<[SchedRR]>;
 
-  let hasSideEffects = 0, mayLoad = 1 in
+  let hasSideEffects = 0, mayLoad = 1, isReMaterializable = 1, canFoldAsLoad = 1 in
   def rm : AVX512PI<opc, MRMSrcMem, (outs MaskInfo.RC:$dst),
                     (ins SrcInfo.ScalarMemOp:$src),
                     !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
diff --git a/llvm/lib/Target/X86/X86InstrFoldTables.cpp b/llvm/lib/Target/X86/X86InstrFoldTables.cpp
index 1d6df0f6ad129..cfea8acecc026 100644
--- a/llvm/lib/Target/X86/X86InstrFoldTables.cpp
+++ b/llvm/lib/Target/X86/X86InstrFoldTables.cpp
@@ -143,6 +143,23 @@ const X86FoldTableEntry *llvm::lookupFoldTable(unsigned RegOp, unsigned OpNum) {
   return lookupFoldTableImpl(FoldTable, RegOp);
 }
 
+const X86FoldTableEntry *
+llvm::lookupBroadcastFoldTable(unsigned RegOp, unsigned OpNum) {
+  ArrayRef<X86FoldTableEntry> FoldTable;
+  if (OpNum == 1)
+    FoldTable = ArrayRef(BroadcastTable1);
+  else if (OpNum == 2)
+    FoldTable = ArrayRef(BroadcastTable2);
+  else if (OpNum == 3)
+    FoldTable = ArrayRef(BroadcastTable3);
+  else if (OpNum == 4)
+    FoldTable = ArrayRef(BroadcastTable4);
+  else
+    return nullptr;
+
+  return lookupFoldTableImpl(FoldTable, RegOp);
+}
+
 namespace {
 
 // This class stores the memory unfolding tables. It is instantiated as a
@@ -288,8 +305,8 @@ struct X86BroadcastFoldTable {
 };
 } // namespace
 
-static bool matchBroadcastSize(const X86FoldTableEntry &Entry,
-                               unsigned BroadcastBits) {
+bool llvm::matchBroadcastSize(const X86FoldTableEntry &Entry,
+                              unsigned BroadcastBits) {
   switch (Entry.Flags & TB_BCAST_MASK) {
   case TB_BCAST_W:
   case TB_BCAST_SH:
diff --git a/llvm/lib/Target/X86/X86InstrFoldTables.h b/llvm/lib/Target/X86/X86InstrFoldTables.h
index a27d868537cbc..9c5dea48d2273 100644
--- a/llvm/lib/Target/X86/X86InstrFoldTables.h
+++ b/llvm/lib/Target/X86/X86InstrFoldTables.h
@@ -44,6 +44,11 @@ const X86FoldTableEntry *lookupTwoAddrFoldTable(unsigned RegOp);
 // operand OpNum.
 const X86FoldTableEntry *lookupFoldTable(unsigned RegOp, unsigned OpNum);
 
+// Look up the broadcast folding table entry for folding a broadcast with
+// operand OpNum.
+const X86FoldTableEntry *lookupBroadcastFoldTable(unsigned RegOp,
+                                                  unsigned OpNum);
+
 // Look up the memory unfolding table entry for this instruction.
 const X86FoldTableEntry *lookupUnfoldTable(unsigned MemOp);
 
@@ -52,6 +57,7 @@ const X86FoldTableEntry *lookupUnfoldTable(unsigned MemOp);
 const X86FoldTableEntry *lookupBroadcastFoldTableBySize(unsigned MemOp,
                                                         unsigned BroadcastBits);
 
+bool matchBroadcastSize(const X86FoldTableEntry &Entry, unsigned BroadcastBits);
 } // namespace llvm
 
 #endif
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index eceabe18154c8..2f399a1f40848 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -862,6 +862,28 @@ bool X86InstrInfo::isReallyTriviallyReMaterializable(
   case X86::MMX_MOVD64rm:
   case X86::MMX_MOVQ64rm:
   // AVX-512
+  case X86::VPBROADCASTBZ128rm:
+  case X86::VPBROADCASTBZ256rm:
+  case X86::VPBROADCASTBZrm:
+  case X86::VBROADCASTF32X2Z256rm:
+  case X86::VBROADCASTF32X2Zrm:
+  case X86::VBROADCASTI32X2Z128rm:
+  case X86::VBROADCASTI32X2Z256rm:
+  case X86::VBROADCASTI32X2Zrm:
+  case X86::VPBROADCASTWZ128rm:
+  case X86::VPBROADCASTWZ256rm:
+  case X86::VPBROADCASTWZrm:
+  case X86::VPBROADCASTDZ128rm:
+  case X86::VPBROADCASTDZ256rm:
+  case X86::VPBROADCASTDZrm:
+  case X86::VBROADCASTSSZ128rm:
+  case X86::VBROADCASTSSZ256rm:
+  case X86::VBROADCASTSSZrm:
+  case X86::VPBROADCASTQZ128rm:
+  case X86::VPBROADCASTQZ256rm:
+  case X86::VPBROADCASTQZrm:
+  case X86::VBROADCASTSDZ256rm:
+  case X86::VBROADCASTSDZrm:
   case X86::VMOVSSZrm:
   case X86::VMOVSSZrm_alt:
   case X86::VMOVSDZrm:
@@ -8067,6 +8089,39 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
     MOs.push_back(MachineOperand::CreateReg(0, false));
     break;
   }
+  case X86::VPBROADCASTBZ128rm:
+  case X86::VPBROADCASTBZ256rm:
+  case X86::VPBROADCASTBZrm:
+  case X86::VBROADCASTF32X2Z256rm:
+  case X86::VBROADCASTF32X2Zrm:
+  case X86::VBROADCASTI32X2Z128rm:
+  case X86::VBROADCASTI32X2Z256rm:
+  case X86::VBROADCASTI32X2Zrm:
+    // No instructions currently fuse with 8bits or 32bits x 2.
+    return nullptr;
+
+#define FOLD_BROADCAST(SIZE)                                                   \
+  MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands,          \
+             LoadMI.operands_begin() + NumOps);                                \
+  return foldMemoryBroadcast(MF, MI, Ops[0], MOs, InsertPt, /*Size=*/SIZE,     \
+                             Alignment, /*AllowCommute=*/true);
+  case X86::VPBROADCASTWZ128rm:
+  case X86::VPBROADCASTWZ256rm:
+  case X86::VPBROADCASTWZrm:
+    FOLD_BROADCAST(16);
+  case X86::VPBROADCASTDZ128rm:
+  case X86::VPBROADCASTDZ256rm:
+  case X86::VPBROADCASTDZrm:
+  case X86::VBROADCASTSSZ128rm:
+  case X86::VBROADCASTSSZ256rm:
+  case X86::VBROADCASTSSZrm:
+    FOLD_BROADCAST(32);
+  case X86::VPBROADCASTQZ128rm:
+  case X86::VPBROADCASTQZ256rm:
+  case X86::VPBROADCASTQZrm:
+  case X86::VBROADCASTSDZ256rm:
+  case X86::VBROADCASTSDZrm:
+    FOLD_BROADCAST(64);
   default: {
     if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
       return nullptr;
@@ -8081,6 +8136,78 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
                                /*Size=*/0, Alignment, /*AllowCommute=*/true);
 }
 
+MachineInstr *X86InstrInfo::foldMemoryBroadcast(
+    MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
+    ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
+    unsigned BitsSize, Align Alignment, bool AllowCommute) const {
+
+  if (auto *I = lookupBroadcastFoldTable(MI.getOpcode(), OpNum))
+    return matchBroadcastSize(*I, BitsSize)
+               ? FuseInst(MF, I->DstOp, OpNum, MOs, InsertPt, MI, *this)
+               : nullptr;
+
+  // TODO: Share code with foldMemoryOperandImpl for the commute
+  if (AllowCommute) {
+    unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex;
+    if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) {
+      bool HasDef = MI.getDesc().getNumDefs();
+      Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
+      Register Reg1 = MI.getOperand(CommuteOpIdx1).getReg();
+      Register Reg2 = MI.getOperand(CommuteOpIdx2).getReg();
+      bool Tied1 =
+          0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO);
+      bool Tied2 =
+          0 == MI.getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO);
+
+      // If either of the commutable operands are tied to the destination
+      // then we can not commute + fold.
+      if ((HasDef && Reg0 == Reg1 && Tied1) ||
+          (HasDef && Reg0 == Reg2 && Tied2))
+        return nullptr;
+
+      MachineInstr *CommutedMI =
+          commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
+      if (!CommutedMI) {
+        // Unable to commute.
+        return nullptr;
+      }
+      if (CommutedMI != &MI) {
+        // New instruction. We can't fold from this.
+        CommutedMI->eraseFromParent();
+        return nullptr;
+      }
+
+      // Attempt to fold with the commuted version of the instruction.
+      MachineInstr *NewMI = foldMemoryBroadcast(MF, MI, CommuteOpIdx2, MOs,
+                                                InsertPt, BitsSize, Alignment,
+                                                /*AllowCommute=*/false);
+      if (NewMI)
+        return NewMI;
+
+      // Folding failed again - undo the commute before returning.
+      MachineInstr *UncommutedMI =
+          commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
+      if (!UncommutedMI) {
+        // Unable to commute.
+        return nullptr;
+      }
+      if (UncommutedMI != &MI) {
+        // New instruction. It doesn't need to be kept.
+        UncommutedMI->eraseFromParent();
+        return nullptr;
+      }
+
+      // Return here to prevent duplicate fuse failure report.
+      return nullptr;
+    }
+  }
+
+  // No fusion
+  if (PrintFailedFusing && !MI.isCopy())
+    dbgs() << "We failed to fuse operand " << OpNum << " in " << MI;
+  return nullptr;
+}
+
 static SmallVector<MachineMemOperand *, 2>
 extractLoadMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) {
   SmallVector<MachineMemOperand *, 2> LoadMMOs;
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index 8512635eadaed..957d7ce63a5c2 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -643,6 +643,13 @@ class X86InstrInfo final : public X86GenInstrInfo {
                                         MachineBasicBlock::iterator InsertPt,
                                         unsigned Size, Align Alignment) const;
 
+  MachineInstr *foldMemoryBroadcast(MachineFunction &MF, MachineInstr &MI,
+                                    unsigned OpNum,
+                                    ArrayRef<MachineOperand> MOs,
+                                    MachineBasicBlock::iterator InsertPt,
+                                    unsigned BitsSize, Align Alignment,
+                                    bool AllowCommute) const;
+
   /// isFrameOperand - Return true and the FrameIndex if the specified
   /// operand and follow operands form a reference to the stack frame.
   bool isFrameOperand(const MachineInstr &MI, unsigned int Op,
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
index c562e9d9a3280..85dd0dcd0d4da 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-7.ll
@@ -14967,14 +14967,14 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512-FCP-NEXT:    vpermd %zmm4, %zmm1, %zmm0
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm3 = [8,1,12,5,12,5,14,15]
 ; AVX512-FCP-NEXT:    vpermd %zmm4, %zmm3, %zmm2
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm4, %zmm29
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm4, %zmm26
 ; AVX512-FCP-NEXT:    vpermd %zmm5, %zmm3, %zmm4
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm5, %zmm20
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm5, %zmm22
 ; AVX512-FCP-NEXT:    vmovdqa 480(%rdi), %ymm5
 ; AVX512-FCP-NEXT:    vmovdqa 448(%rdi), %ymm6
 ; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm3 = ymm6[0,1],ymm5[2],ymm6[3,4,5],ymm5[6],ymm6[7]
 ; AVX512-FCP-NEXT:    vmovdqa64 %ymm6, %ymm21
-; AVX512-FCP-NEXT:    vmovdqa %ymm5, %ymm12
+; AVX512-FCP-NEXT:    vmovdqa64 %ymm5, %ymm23
 ; AVX512-FCP-NEXT:    vextracti128 $1, %ymm3, %xmm5
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4],xmm3[5],xmm5[6],xmm3[7]
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -14985,11 +14985,11 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512-FCP-NEXT:    vmovdqa 672(%rdi), %xmm7
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,0,1,14,15,12,13,14,15]
 ; AVX512-FCP-NEXT:    vpshufb %xmm4, %xmm7, %xmm6
-; AVX512-FCP-NEXT:    vmovdqa64 %xmm7, %xmm27
+; AVX512-FCP-NEXT:    vmovdqa64 %xmm7, %xmm20
 ; AVX512-FCP-NEXT:    vpbroadcastw 700(%rdi), %xmm7
-; AVX512-FCP-NEXT:    vpunpckhdq {{.*#+}} xmm30 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX512-FCP-NEXT:    vmovdqa64 192(%rdi), %ymm28
-; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm9 = ymm28[0,1,0,2]
+; AVX512-FCP-NEXT:    vpunpckhdq {{.*#+}} xmm28 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; AVX512-FCP-NEXT:    vmovdqa64 192(%rdi), %ymm31
+; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm9 = ymm31[0,1,0,2]
 ; AVX512-FCP-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512-FCP-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
 ; AVX512-FCP-NEXT:    vmovdqa (%rdi), %ymm6
@@ -15006,8 +15006,8 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512-FCP-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
 ; AVX512-FCP-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; AVX512-FCP-NEXT:    vmovdqa 240(%rdi), %xmm15
-; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,0,1,6,7,8,9,18,19,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512-FCP-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
+; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,0,1,6,7,8,9,18,19,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512-FCP-NEXT:    vpshufb %ymm5, %ymm0, %ymm0
 ; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm3 = ymm8[0,1,2],ymm7[3],ymm8[4,5],ymm7[6],ymm8[7]
 ; AVX512-FCP-NEXT:    vmovdqa64 %ymm8, %ymm18
 ; AVX512-FCP-NEXT:    vmovdqa64 %ymm7, %ymm19
@@ -15017,146 +15017,145 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512-FCP-NEXT:    vpshufb %ymm8, %ymm3, %ymm3
 ; AVX512-FCP-NEXT:    vpor %ymm0, %ymm3, %ymm0
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT:    vmovdqa 160(%rdi), %ymm11
-; AVX512-FCP-NEXT:    vmovdqa 128(%rdi), %ymm3
-; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm4 = ymm3[0,1],ymm11[2],ymm3[3,4,5],ymm11[6],ymm3[7]
+; AVX512-FCP-NEXT:    vmovdqa 160(%rdi), %ymm2
+; AVX512-FCP-NEXT:    vmovdqa 128(%rdi), %ymm12
+; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm4 = ymm12[0,1],ymm2[2],ymm12[3,4,5],ymm2[6],ymm12[7]
 ; AVX512-FCP-NEXT:    vextracti128 $1, %ymm4, %xmm6
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4],xmm4[5],xmm6[6],xmm4[7]
-; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
-; AVX512-FCP-NEXT:    vpshufb %xmm4, %xmm6, %xmm6
+; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
+; AVX512-FCP-NEXT:    vpshufb %xmm3, %xmm6, %xmm6
 ; AVX512-FCP-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm7
 ; AVX512-FCP-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29]
 ; AVX512-FCP-NEXT:    vpshufb %ymm6, %ymm9, %ymm9
 ; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm9 = ymm7[0,1,2,3,4,5,6],ymm9[7]
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm10 = xmm15[0],xmm14[1],xmm15[2,3,4,5,6,7]
-; AVX512-FCP-NEXT:    vmovdqa %xmm15, %xmm5
-; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm7 = [2,3,0,1,14,15,14,15,8,9,10,11,12,13,14,15]
-; AVX512-FCP-NEXT:    vpshufb %xmm7, %xmm10, %xmm10
+; AVX512-FCP-NEXT:    vmovdqa %xmm15, %xmm7
+; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm11 = [2,3,0,1,14,15,14,15,8,9,10,11,12,13,14,15]
+; AVX512-FCP-NEXT:    vpshufb %xmm11, %xmm10, %xmm10
 ; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512-FCP-NEXT:    vmovdqa64 %ymm21, %ymm13
-; AVX512-FCP-NEXT:    vmovdqa %ymm12, %ymm15
-; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm9 = ymm13[0,1,2],ymm12[3],ymm13[4,5],ymm12[6],ymm13[7]
+; AVX512-FCP-NEXT:    vmovdqa64 %ymm23, %ymm15
+; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm9 = ymm13[0,1,2],ymm15[3],ymm13[4,5],ymm15[6],ymm13[7]
 ; AVX512-FCP-NEXT:    vextracti128 $1, %ymm9, %xmm10
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1],xmm10[2,3,4,5],xmm9[6],xmm10[7]
 ; AVX512-FCP-NEXT:    vpshufb %ymm8, %ymm9, %ymm8
-; AVX512-FCP-NEXT:    vpermd %zmm20, %zmm1, %zmm1
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm20, %zmm22
-; AVX512-FCP-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
+; AVX512-FCP-NEXT:    vpermd %zmm22, %zmm1, %zmm1
+; AVX512-FCP-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
 ; AVX512-FCP-NEXT:    vpor %ymm1, %ymm8, %ymm1
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT:    vmovdqa 608(%rdi), %ymm0
-; AVX512-FCP-NEXT:    vmovdqa 576(%rdi), %ymm1
-; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3,4,5],ymm0[6],ymm1[7]
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm1, %ymm23
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm0, %ymm26
+; AVX512-FCP-NEXT:    vmovdqa 608(%rdi), %ymm4
+; AVX512-FCP-NEXT:    vmovdqa 576(%rdi), %ymm5
+; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm8 = ymm5[0,1],ymm4[2],ymm5[3,4,5],ymm4[6],ymm5[7]
 ; AVX512-FCP-NEXT:    vextracti128 $1, %ymm8, %xmm9
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0,1,2,3],xmm9[4],xmm8[5],xmm9[6],xmm8[7]
 ; AVX512-FCP-NEXT:    vmovdqa64 640(%rdi), %ymm25
-; AVX512-FCP-NEXT:    vpshufb %xmm4, %xmm8, %xmm8
+; AVX512-FCP-NEXT:    vpshufb %xmm3, %xmm8, %xmm8
 ; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm0 = ymm25[0,1,0,2]
 ; AVX512-FCP-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
 ; AVX512-FCP-NEXT:    vpshufb %ymm6, %ymm0, %ymm6
 ; AVX512-FCP-NEXT:    vmovdqa64 %ymm0, %ymm21
 ; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm8[0,1,2,3,4,5,6],ymm6[7]
-; AVX512-FCP-NEXT:    vmovdqa 688(%rdi), %xmm4
-; AVX512-FCP-NEXT:    vmovdqa64 %xmm27, %xmm2
-; AVX512-FCP-NEXT:    vmovdqa64 %xmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm8 = xmm4[0],xmm2[1],xmm4[2,3,4,5,6,7]
-; AVX512-FCP-NEXT:    vpshufb %xmm7, %xmm8, %xmm8
-; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm7 = [2,6,9,u,13,u,u,u]
+; AVX512-FCP-NEXT:    vmovdqa 688(%rdi), %xmm3
+; AVX512-FCP-NEXT:    vmovdqa64 %xmm20, %xmm1
+; AVX512-FCP-NEXT:    vmovdqa64 %xmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm8 = xmm3[0],xmm1[1],xmm3[2,3,4,5,6,7]
+; AVX512-FCP-NEXT:    vpshufb %xmm11, %xmm8, %xmm8
+; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} ymm23 = [2,6,9,u,13,u,u,u]
 ; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm8, %zmm6, %zmm0
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm18, %ymm1
+; AVX512-FCP-NEXT:    vmovdqa64 %ymm18, %ymm11
 ; AVX512-FCP-NEXT:    vmovdqu64 %ymm18, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512-FCP-NEXT:    vmovdqa64 %ymm19, %ymm0
 ; AVX512-FCP-NEXT:    vmovdqu64 %ymm19, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6,7]
+; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm0[0],ymm11[1],ymm0[2,3],ymm11[4],ymm0[5,6,7]
 ; AVX512-FCP-NEXT:    vextracti128 $1, %ymm6, %xmm8
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm8 = xmm6[0],xmm8[1],xmm6[2,3,4,5],xmm8[6],xmm6[7]
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm29, %zmm31
-; AVX512-FCP-NEXT:    vmovdqu64 %zmm29, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT:    vpermd %zmm29, %zmm7, %zmm9
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm26, %zmm29
+; AVX512-FCP-NEXT:    vmovdqu64 %zmm26, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT:    vpermd %zmm26, %zmm23, %zmm9
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,2,3,4,5,10,11,16,17,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512-FCP-NEXT:    vpshufb %ymm6, %ymm9, %ymm9
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm10 = [4,5,2,3,0,1,14,15,12,13,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512-FCP-NEXT:    vpshufb %ymm10, %ymm8, %ymm8
 ; AVX512-FCP-NEXT:    vpor %ymm9, %ymm8, %ymm8
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm8 = ymm3[0,1,2],ymm11[3],ymm3[4,5],ymm11[6],ymm3[7]
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm11, %ymm29
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm3, %ymm27
+; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm8 = ymm12[0,1,2],ymm2[3],ymm12[4,5],ymm2[6],ymm12[7]
+; AVX512-FCP-NEXT:    vmovdqa64 %ymm2, %ymm27
+; AVX512-FCP-NEXT:    vmovdqa64 %ymm12, %ymm26
 ; AVX512-FCP-NEXT:    vextracti128 $1, %ymm8, %xmm9
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm8 = xmm9[0],xmm8[1],xmm9[2,3,4,5],xmm8[6],xmm9[7]
-; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
-; AVX512-FCP-NEXT:    vpshufb %xmm9, %xmm8, %xmm8
+; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
+; AVX512-FCP-NEXT:    vpshufb %xmm2, %xmm8, %xmm8
+; AVX512-FCP-NEXT:    vmovdqa64 %xmm2, %xmm24
 ; AVX512-FCP-NEXT:    vinserti128 $1, %xmm8, %ymm0, %ymm8
-; AVX512-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm11 = [2,5,2,5,2,5,2,5]
-; AVX512-FCP-NEXT:    vpermd %ymm28, %ymm11, %ymm12
+; AVX512-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm18 = [2,5,2,5,2,5,2,5]
+; AVX512-FCP-NEXT:    vpermd %ymm31, %ymm18, %ymm12
 ; AVX512-FCP-NEXT:    vpshufhw {{.*#+}} ymm12 = ymm12[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
 ; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm12 = ymm8[0,1,2,3,4,5,6],ymm12[7]
-; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm14[0],xmm5[0],xmm14[1],xmm5[1],xmm14[2],xmm5[2],xmm14[3],xmm5[3]
-; AVX512-FCP-NEXT:    vmovdqa64 %xmm5, %xmm20
-; AVX512-FCP-NEXT:    vmovdqa64 %xmm14, %xmm18
-; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm5 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512-FCP-NEXT:    vpshufb %xmm5, %xmm8, %xmm14
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm14, %zmm12, %zmm3
-; AVX512-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm8 = xmm14[0],xmm7[0],xmm14[1],xmm7[1],xmm14[2],xmm7[2],xmm14[3],xmm7[3]
+; AVX512-FCP-NEXT:    vmovdqa64 %xmm7, %xmm20
+; AVX512-FCP-NEXT:    vmovdqa64 %xmm14, %xmm19
+; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm9 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512-FCP-NEXT:    vpshufb %xmm9, %xmm8, %xmm14
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm14, %zmm12, %zmm2
+; AVX512-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm12 = ymm15[0],ymm13[1],ymm15[2,3],ymm13[4],ymm15[5,6,7]
 ; AVX512-FCP-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT:    vmovdqa %ymm13, %ymm3
+; AVX512-FCP-NEXT:    vmovdqa %ymm13, %ymm2
 ; AVX512-FCP-NEXT:    vmovdqu %ymm13, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512-FCP-NEXT:    vextracti128 $1, %ymm12, %xmm14
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm12 = xmm12[0],xmm14[1],xmm12[2,3,4,5],xmm14[6],xmm12[7]
 ; AVX512-FCP-NEXT:    vpshufb %ymm10, %ymm12, %ymm10
-; AVX512-FCP-NEXT:    vpermd %zmm22, %zmm7, %zmm7
+; AVX512-FCP-NEXT:    vpermd %zmm22, %zmm23, %zmm7
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm22, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512-FCP-NEXT:    vpshufb %ymm6, %ymm7, %ymm6
 ; AVX512-FCP-NEXT:    vpor %ymm6, %ymm10, %ymm6
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm23, %ymm6
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm26, %ymm7
-; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2],ymm7[3],ymm6[4,5],ymm7[6],ymm6[7]
+; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7]
+; AVX512-FCP-NEXT:    vmovdqa64 %ymm4, %ymm23
+; AVX512-FCP-NEXT:    vmovdqa %ymm5, %ymm4
 ; AVX512-FCP-NEXT:    vextracti128 $1, %ymm6, %xmm7
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm6 = xmm7[0],xmm6[1],xmm7[2,3,4,5],xmm6[6],xmm7[7]
-; AVX512-FCP-NEXT:    vpshufb %xmm9, %xmm6, %xmm6
+; AVX512-FCP-NEXT:    vmovdqa64 %xmm24, %xmm5
+; AVX512-FCP-NEXT:    vpshufb %xmm5, %xmm6, %xmm6
 ; AVX512-FCP-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512-FCP-NEXT:    vpermd %ymm25, %ymm11, %ymm7
+; AVX512-FCP-NEXT:    vpermd %ymm25, %ymm18, %ymm7
 ; AVX512-FCP-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
 ; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5,6],ymm7[7]
-; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; AVX512-FCP-NEXT:    vmovdqa64 %xmm4, %xmm19
-; AVX512-FCP-NEXT:    vpshufb %xmm5, %xmm7, %xmm9
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm9, %zmm6, %zmm2
-; AVX512-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm0[0],ymm1[1],ymm0[2,3,4],ymm1[5],ymm0[6,7]
+; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX512-FCP-NEXT:    vmovdqa64 %xmm3, %xmm18
+; AVX512-FCP-NEXT:    vpshufb %xmm9, %xmm7, %xmm9
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm9, %zmm6, %zmm1
+; AVX512-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm0[0],ymm11[1],ymm0[2,3,4],ymm11[5],ymm0[6,7]
 ; AVX512-FCP-NEXT:    vextracti128 $1, %ymm6, %xmm9
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm6[1],xmm9[2],xmm6[3],xmm9[4,5,6,7]
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm1 = [10,3,6,15,12,13,6,15]
-; AVX512-FCP-NEXT:    vpermd %zmm31, %zmm1, %zmm10
+; AVX512-FCP-NEXT:    vpermd %zmm29, %zmm1, %zmm10
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,4,5,10,11,0,1,22,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512-FCP-NEXT:    vpshufb %ymm6, %ymm10, %ymm10
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm12 = [6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512-FCP-NEXT:    vpshufb %ymm12, %ymm9, %ymm9
 ; AVX512-FCP-NEXT:    vpor %ymm10, %ymm9, %ymm0
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm27, %ymm4
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm29, %ymm5
-; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm9 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6,7]
+; AVX512-FCP-NEXT:    vmovdqa64 %ymm27, %ymm3
+; AVX512-FCP-NEXT:    vmovdqa64 %ymm26, %ymm5
+; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm9 = ymm3[0],ymm5[1],ymm3[2,3],ymm5[4],ymm3[5,6,7]
 ; AVX512-FCP-NEXT:    vextracti128 $1, %ymm9, %xmm10
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm9 = xmm9[0],xmm10[1],xmm9[2,3,4,5],xmm10[6],xmm9[7]
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm10 = [0,1,4,5,2,3,0,1,14,15,12,13,12,13,14,15]
 ; AVX512-FCP-NEXT:    vpshufb %xmm10, %xmm9, %xmm9
 ; AVX512-FCP-NEXT:    vinserti128 $1, %xmm9, %ymm0, %ymm14
 ; AVX512-FCP-NEXT:    vpbroadcastd {{.*#+}} ymm11 = [18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25,18,19,24,25]
-; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm9 = ymm28[0,1,1,3]
+; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm9 = ymm31[0,1,1,3]
 ; AVX512-FCP-NEXT:    vpshufb %ymm11, %ymm9, %ymm13
 ; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm13 = ymm14[0,1,2,3,4,5,6],ymm13[7]
 ; AVX512-FCP-NEXT:    vpbroadcastd {{.*#+}} xmm14 = [12,13,10,11,12,13,10,11,12,13,10,11,12,13,10,11]
 ; AVX512-FCP-NEXT:    vpshufb %xmm14, %xmm8, %xmm8
 ; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm8, %zmm13, %zmm0
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm8 = ymm15[0],ymm3[1],ymm15[2,3,4],ymm3[5],ymm15[6,7]
+; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm8 = ymm15[0],ymm2[1],ymm15[2,3,4],ymm2[5],ymm15[6,7]
 ; AVX512-FCP-NEXT:    vextracti128 $1, %ymm8, %xmm13
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm8 = xmm13[0],xmm8[1],xmm13[2],xmm8[3],xmm13[4,5,6,7]
 ; AVX512-FCP-NEXT:    vpshufb %ymm12, %ymm8, %ymm8
@@ -15164,9 +15163,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512-FCP-NEXT:    vpshufb %ymm6, %ymm12, %ymm6
 ; AVX512-FCP-NEXT:    vpor %ymm6, %ymm8, %ymm0
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm23, %ymm1
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm26, %ymm2
-; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5,6,7]
+; AVX512-FCP-NEXT:    vmovdqa %ymm4, %ymm1
+; AVX512-FCP-NEXT:    vmovdqa64 %ymm23, %ymm2
+; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6,7]
 ; AVX512-FCP-NEXT:    vextracti128 $1, %ymm6, %xmm8
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2,3,4,5],xmm8[6],xmm6[7]
 ; AVX512-FCP-NEXT:    vpshufb %xmm10, %xmm6, %xmm6
@@ -15177,7 +15176,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512-FCP-NEXT:    vpshufb %xmm14, %xmm7, %xmm7
 ; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm0
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm0 = ymm5[0],ymm4[1],ymm5[2,3,4],ymm4[5],ymm5[6,7]
+; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm0 = ymm3[0],ymm5[1],ymm3[2,3,4],ymm5[5],ymm3[6,7]
 ; AVX512-FCP-NEXT:    vextracti128 $1, %ymm0, %xmm3
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm0[1],xmm3[2],xmm0[3],xmm3[4,5,6,7]
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
@@ -15202,7 +15201,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5,6],ymm4[7]
 ; AVX512-FCP-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512-FCP-NEXT:    kmovw %eax, %k1
-; AVX512-FCP-NEXT:    vinserti32x4 $2, %xmm30, %zmm4, %zmm17 {%k1}
+; AVX512-FCP-NEXT:    vinserti32x4 $2, %xmm28, %zmm4, %zmm17 {%k1}
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm17, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3,4],ymm1[5],ymm2[6,7]
 ; AVX512-FCP-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -15212,12 +15211,12 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512-FCP-NEXT:    vpshufb %ymm7, %ymm8, %ymm1
 ; AVX512-FCP-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
 ; AVX512-FCP-NEXT:    vpbroadcastw 680(%rdi), %xmm1
-; AVX512-FCP-NEXT:    vmovdqa64 %xmm19, %xmm9
-; AVX512-FCP-NEXT:    vpsrlq $48, %xmm19, %xmm2
+; AVX512-FCP-NEXT:    vmovdqa64 %xmm18, %xmm9
+; AVX512-FCP-NEXT:    vpsrlq $48, %xmm18, %xmm2
 ; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
 ; AVX512-FCP-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm26
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm2 = [0,3,3,3,0,3,7,7]
-; AVX512-FCP-NEXT:    vpermd %ymm28, %ymm2, %ymm0
+; AVX512-FCP-NEXT:    vpermd %ymm31, %ymm2, %ymm0
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm12 = [0,1,6,7,4,5,6,7,8,9,0,1,6,7,8,9,16,17,22,23,20,21,22,23,24,25,16,17,22,23,24,25]
 ; AVX512-FCP-NEXT:    vpshufb %ymm12, %ymm0, %ymm6
 ; AVX512-FCP-NEXT:    vmovdqa64 128(%rdi), %zmm4
@@ -15227,8 +15226,8 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm0 = [0,1,0,1,6,7,8,9,14,15,14,15,14,15,14,15,16,17,16,17,22,23,24,25,30,31,30,31,30,31,30,31]
 ; AVX512-FCP-NEXT:    vpshufb %ymm0, %ymm8, %ymm8
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm6 = ymm8[0,1,2,3,4],ymm6[5,6,7],ymm8[8,9,10,11,12],ymm6[13,14,15]
-; AVX512-FCP-NEXT:    vmovdqa64 %xmm18, %xmm15
-; AVX512-FCP-NEXT:    vpsrld $16, %xmm18, %xmm8
+; AVX512-FCP-NEXT:    vmovdqa64 %xmm19, %xmm15
+; AVX512-FCP-NEXT:    vpsrld $16, %xmm19, %xmm8
 ; AVX512-FCP-NEXT:    vpunpckhwd {{.*#+}} xmm8 = xmm8[4],xmm5[4],xmm8[5],xmm5[5],xmm8[6],xmm5[6],xmm8[7],xmm5[7]
 ; AVX512-FCP-NEXT:    vinserti32x4 $2, %xmm8, %zmm6, %zmm3
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
@@ -15267,7 +15266,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512-FCP-NEXT:    vbroadcasti128 {{.*#+}} ymm12 = [0,4,7,0,0,4,7,0]
 ; AVX512-FCP-NEXT:    # ymm12 = mem[0,1,0,1]
-; AVX512-FCP-NEXT:    vpermd %ymm28, %ymm12, %ymm0
+; AVX512-FCP-NEXT:    vpermd %ymm31, %ymm12, %ymm0
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm2 = [4,5,2,3,4,5,6,7,8,9,2,3,4,5,10,11,20,21,18,19,20,21,22,23,24,25,18,19,20,21,26,27]
 ; AVX512-FCP-NEXT:    vpshufb %ymm2, %ymm0, %ymm0
 ; AVX512-FCP-NEXT:    vmovdqa64 %ymm2, %ymm23
@@ -15648,12 +15647,12 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512-FCP-NEXT:    movw $-512, %ax # imm = 0xFE00
 ; AVX512-FCP-NEXT:    kmovw %eax, %k1
 ; AVX512-FCP-NEXT:    vmovdqa32 %zmm8, %zmm19 {%k1}
-; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
-; AVX512-FCP-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm8 # 64-byte Folded Reload
-; AVX512-FCP-NEXT:    vmovdqa32 %zmm9, %zmm8 {%k1}
 ; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512-FCP-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm2 # 64-byte Folded Reload
-; AVX512-FCP-NEXT:    vmovdqa32 %zmm10, %zmm2 {%k1}
+; AVX512-FCP-NEXT:    vmovdqa32 %zmm9, %zmm2 {%k1}
+; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512-FCP-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm8 # 64-byte Folded Reload
+; AVX512-FCP-NEXT:    vmovdqa32 %zmm10, %zmm8 {%k1}
 ; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm6 # 64-byte Reload
 ; AVX512-FCP-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm29, %zmm6 # 64-byte Folded Reload
 ; AVX512-FCP-NEXT:    vmovdqa32 %zmm12, %zmm6 {%k1}
@@ -15667,10 +15666,10 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512-FCP-NEXT:    vmovdqa64 %zmm17, 64(%rsi)
 ; AVX512-FCP-NEXT:    vmovdqa64 %zmm11, 64(%rdx)
 ; AVX512-FCP-NEXT:    vmovdqa64 %zmm5, (%rdx)
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm8, 64(%rcx)
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, 64(%rcx)
 ; AVX512-FCP-NEXT:    vmovdqa64 %zmm19, (%rcx)
 ; AVX512-FCP-NEXT:    vmovdqa64 %zmm6, 64(%r8)
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, (%r8)
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm8, (%r8)
 ; AVX512-FCP-NEXT:    vmovdqa64 %zmm10, 64(%r9)
 ; AVX512-FCP-NEXT:    vmovdqa64 %zmm9, (%r9)
 ; AVX512-FCP-NEXT:    movq {{[0-9]+}}(%rsp), %rax
@@ -16563,20 +16562,19 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ;
 ; AVX512DQ-FCP-LABEL: load_i16_stride7_vf64:
 ; AVX512DQ-FCP:       # %bb.0:
-; AVX512DQ-FCP-NEXT:    subq $1304, %rsp # imm = 0x518
+; AVX512DQ-FCP-NEXT:    subq $1240, %rsp # imm = 0x4D8
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 512(%rdi), %zmm2
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%rdi), %zmm4
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} ymm17 = [2,5,9,u,12,u,u,u]
 ; AVX512DQ-FCP-NEXT:    vpermd %zmm4, %zmm17, %zmm0
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm3 = [8,1,12,5,12,5,14,15]
 ; AVX512DQ-FCP-NEXT:    vpermd %zmm4, %zmm3, %zmm1
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm4, %zmm27
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm4, %zmm11
 ; AVX512DQ-FCP-NEXT:    vpermd %zmm2, %zmm3, %zmm4
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, %zmm18
-; AVX512DQ-FCP-NEXT:    vmovdqa 480(%rdi), %ymm5
-; AVX512DQ-FCP-NEXT:    vmovdqa 448(%rdi), %ymm12
-; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm3 = ymm12[0,1],ymm5[2],ymm12[3,4,5],ymm5[6],ymm12[7]
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm5, %ymm22
+; AVX512DQ-FCP-NEXT:    vmovdqa 480(%rdi), %ymm12
+; AVX512DQ-FCP-NEXT:    vmovdqa 448(%rdi), %ymm14
+; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm3 = ymm14[0,1],ymm12[2],ymm14[3,4,5],ymm12[6],ymm14[7]
 ; AVX512DQ-FCP-NEXT:    vextracti128 $1, %ymm3, %xmm5
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4],xmm3[5],xmm5[6],xmm3[7]
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm3 = [0,1,14,15,12,13,10,11,8,9,128,128,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u]
@@ -16587,10 +16585,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vmovdqa 672(%rdi), %xmm7
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,2,3,4,5,6,7,0,1,14,15,12,13,14,15]
 ; AVX512DQ-FCP-NEXT:    vpshufb %xmm4, %xmm7, %xmm6
-; AVX512DQ-FCP-NEXT:    vmovdqa %xmm7, %xmm11
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm7, %xmm19
 ; AVX512DQ-FCP-NEXT:    vpbroadcastw 700(%rdi), %xmm7
-; AVX512DQ-FCP-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
-; AVX512DQ-FCP-NEXT:    vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT:    vpunpckhdq {{.*#+}} xmm26 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 192(%rdi), %ymm24
 ; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm9 = ymm24[0,1,0,2]
 ; AVX512DQ-FCP-NEXT:    vmovdqu %ymm9, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -16603,8 +16600,8 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm3, %ymm5, %ymm3
 ; AVX512DQ-FCP-NEXT:    vporq %ymm1, %ymm3, %ymm31
-; AVX512DQ-FCP-NEXT:    vmovdqa 224(%rdi), %xmm2
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm4, %xmm2, %xmm1
+; AVX512DQ-FCP-NEXT:    vmovdqa 224(%rdi), %xmm5
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm4, %xmm5, %xmm1
 ; AVX512DQ-FCP-NEXT:    vpbroadcastw 252(%rdi), %xmm3
 ; AVX512DQ-FCP-NEXT:    vpunpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
 ; AVX512DQ-FCP-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -16621,9 +16618,10 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vpor %ymm0, %ymm1, %ymm0
 ; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FCP-NEXT:    vmovdqa 160(%rdi), %ymm0
-; AVX512DQ-FCP-NEXT:    vmovdqa 128(%rdi), %ymm5
-; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm0[2],ymm5[3,4,5],ymm0[6],ymm5[7]
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm0, %ymm19
+; AVX512DQ-FCP-NEXT:    vmovdqa 128(%rdi), %ymm1
+; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm4 = ymm1[0,1],ymm0[2],ymm1[3,4,5],ymm0[6],ymm1[7]
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm1, %ymm25
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm0, %ymm29
 ; AVX512DQ-FCP-NEXT:    vextracti128 $1, %ymm4, %xmm6
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm6 = xmm4[0,1,2,3],xmm6[4],xmm4[5],xmm6[6],xmm4[7]
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,1,0,1,14,15,12,13,10,11,8,9,12,13,14,15]
@@ -16632,28 +16630,26 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vpbroadcastd {{.*#+}} ymm6 = [22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29,22,23,28,29]
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm6, %ymm9, %ymm9
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm9 = ymm7[0,1,2,3,4,5,6],ymm9[7]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm10 = xmm13[0],xmm2[1],xmm13[2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm13, %xmm30
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm2, %xmm28
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm10 = xmm13[0],xmm5[1],xmm13[2,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm13, %xmm27
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm7 = [2,3,0,1,14,15,14,15,8,9,10,11,12,13,14,15]
 ; AVX512DQ-FCP-NEXT:    vpshufb %xmm7, %xmm10, %xmm10
 ; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
 ; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm22, %ymm0
-; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm9 = ymm12[0,1,2],ymm0[3],ymm12[4,5],ymm0[6],ymm12[7]
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm22, %ymm23
+; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm9 = ymm14[0,1,2],ymm12[3],ymm14[4,5],ymm12[6],ymm14[7]
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm14, %ymm23
 ; AVX512DQ-FCP-NEXT:    vextracti128 $1, %ymm9, %xmm10
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm9 = xmm10[0],xmm9[1],xmm10[2,3,4,5],xmm9[6],xmm10[7]
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm8, %ymm9, %ymm8
-; AVX512DQ-FCP-NEXT:    vpermd %zmm18, %zmm17, %zmm2
+; AVX512DQ-FCP-NEXT:    vpermd %zmm2, %zmm17, %zmm2
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm3, %ymm2, %ymm2
 ; AVX512DQ-FCP-NEXT:    vpor %ymm2, %ymm8, %ymm2
 ; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FCP-NEXT:    vmovdqa 608(%rdi), %ymm0
 ; AVX512DQ-FCP-NEXT:    vmovdqa 576(%rdi), %ymm1
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm3 = ymm1[0,1],ymm0[2],ymm1[3,4,5],ymm0[6],ymm1[7]
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm1, %ymm25
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm0, %ymm17
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm1, %ymm21
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm0, %ymm30
 ; AVX512DQ-FCP-NEXT:    vextracti128 $1, %ymm3, %xmm8
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm8[4],xmm3[5],xmm8[6],xmm3[7]
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 640(%rdi), %ymm22
@@ -16661,57 +16657,56 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm0 = ymm22[0,1,0,2]
 ; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm6, %ymm0, %ymm6
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm0, %ymm26
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm0, %ymm17
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm4[0,1,2,3,4,5,6],ymm6[7]
 ; AVX512DQ-FCP-NEXT:    vmovdqa 688(%rdi), %xmm14
-; AVX512DQ-FCP-NEXT:    vmovdqa %xmm11, %xmm1
-; AVX512DQ-FCP-NEXT:    vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm4 = xmm14[0],xmm11[1],xmm14[2,3,4,5,6,7]
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm19, %xmm1
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm19, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm4 = xmm14[0],xmm1[1],xmm14[2,3,4,5,6,7]
 ; AVX512DQ-FCP-NEXT:    vpshufb %xmm7, %xmm4, %xmm7
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm4 = [2,6,9,u,13,u,u,u]
 ; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm0
 ; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm16, %ymm0
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm16, %ymm10
 ; AVX512DQ-FCP-NEXT:    vmovdqu64 %ymm16, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm0[0],ymm15[1],ymm0[2,3],ymm15[4],ymm0[5,6,7]
-; AVX512DQ-FCP-NEXT:    vmovdqa %ymm15, %ymm3
+; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm10[0],ymm15[1],ymm10[2,3],ymm15[4],ymm10[5,6,7]
+; AVX512DQ-FCP-NEXT:    vmovdqa %ymm15, %ymm2
 ; AVX512DQ-FCP-NEXT:    vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-FCP-NEXT:    vextracti128 $1, %ymm6, %xmm7
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm7[1],xmm6[2,3,4,5],xmm7[6],xmm6[7]
-; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm27, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT:    vpermd %zmm27, %zmm4, %zmm8
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm11, %zmm28
+; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT:    vpermd %zmm11, %zmm4, %zmm8
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm7 = [128,128,128,128,128,128,128,128,128,128,2,3,4,5,10,11,16,17,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm7, %ymm8, %ymm8
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm9 = [4,5,2,3,0,1,14,15,12,13,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm9, %ymm6, %ymm6
-; AVX512DQ-FCP-NEXT:    vpor %ymm6, %ymm8, %ymm2
-; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm19, %ymm11
-; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm5[0,1,2],ymm11[3],ymm5[4,5],ymm11[6],ymm5[7]
+; AVX512DQ-FCP-NEXT:    vpor %ymm6, %ymm8, %ymm0
+; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm25, %ymm3
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm29, %ymm11
+; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm3[0,1,2],ymm11[3],ymm3[4,5],ymm11[6],ymm3[7]
 ; AVX512DQ-FCP-NEXT:    vextracti128 $1, %ymm6, %xmm8
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm6 = xmm8[0],xmm6[1],xmm8[2,3,4,5],xmm6[6],xmm8[7]
-; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm2, %xmm6, %xmm6
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm2, %xmm16
+; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,2,3,0,1,14,15,12,13,10,11,12,13,14,15]
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm0, %xmm6, %xmm6
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm0, %xmm25
 ; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm6
-; AVX512DQ-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm10 = [2,5,2,5,2,5,2,5]
-; AVX512DQ-FCP-NEXT:    vpermd %ymm24, %ymm10, %ymm13
+; AVX512DQ-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm16 = [2,5,2,5,2,5,2,5]
+; AVX512DQ-FCP-NEXT:    vpermd %ymm24, %ymm16, %ymm13
 ; AVX512DQ-FCP-NEXT:    vpshufhw {{.*#+}} ymm13 = ymm13[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm13 = ymm6[0,1,2,3,4,5,6],ymm13[7]
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm30, %xmm2
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm28, %xmm8
-; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3]
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm30, %xmm21
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm28, %xmm19
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm27, %xmm0
+; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm6 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm5, %xmm19
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm8 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX512DQ-FCP-NEXT:    vpshufb %xmm8, %xmm6, %xmm15
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm15, %zmm13, %zmm2
-; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT:    vmovdqa %ymm12, %ymm2
-; AVX512DQ-FCP-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm23, %ymm12
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm15, %zmm13, %zmm0
+; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm23, %ymm0
 ; AVX512DQ-FCP-NEXT:    vmovdqu64 %ymm23, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm13 = ymm12[0],ymm2[1],ymm12[2,3],ymm2[4],ymm12[5,6,7]
+; AVX512DQ-FCP-NEXT:    vmovdqu %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm13 = ymm12[0],ymm0[1],ymm12[2,3],ymm0[4],ymm12[5,6,7]
 ; AVX512DQ-FCP-NEXT:    vextracti128 $1, %ymm13, %xmm15
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0],xmm15[1],xmm13[2,3,4,5],xmm15[6],xmm13[7]
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm9, %ymm13, %ymm9
@@ -16720,15 +16715,16 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm7, %ymm4, %ymm4
 ; AVX512DQ-FCP-NEXT:    vpor %ymm4, %ymm9, %ymm4
 ; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm17, %ymm4
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm25, %ymm7
-; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm4 = ymm7[0,1,2],ymm4[3],ymm7[4,5],ymm4[6],ymm7[7]
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm21, %ymm5
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm30, %ymm4
+; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm4 = ymm5[0,1,2],ymm4[3],ymm5[4,5],ymm4[6],ymm5[7]
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm30, %ymm21
 ; AVX512DQ-FCP-NEXT:    vextracti128 $1, %ymm4, %xmm7
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2,3,4,5],xmm4[6],xmm7[7]
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm16, %xmm7
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm25, %xmm7
 ; AVX512DQ-FCP-NEXT:    vpshufb %xmm7, %xmm4, %xmm4
 ; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm4, %ymm0, %ymm4
-; AVX512DQ-FCP-NEXT:    vpermd %ymm22, %ymm10, %ymm7
+; AVX512DQ-FCP-NEXT:    vpermd %ymm22, %ymm16, %ymm7
 ; AVX512DQ-FCP-NEXT:    vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,5,4,7,8,9,10,11,12,13,12,15]
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm4 = ymm4[0,1,2,3,4,5,6],ymm7[7]
 ; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm7 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3]
@@ -16736,18 +16732,18 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vpshufb %xmm8, %xmm7, %xmm8
 ; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm8, %zmm4, %zmm1
 ; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm4 = ymm0[0],ymm3[1],ymm0[2,3,4],ymm3[5],ymm0[6,7]
+; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm4 = ymm10[0],ymm2[1],ymm10[2,3,4],ymm2[5],ymm10[6,7]
 ; AVX512DQ-FCP-NEXT:    vextracti128 $1, %ymm4, %xmm8
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm4 = xmm8[0],xmm4[1],xmm8[2],xmm4[3],xmm8[4,5,6,7]
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} ymm16 = [10,3,6,15,12,13,6,15]
-; AVX512DQ-FCP-NEXT:    vpermd %zmm27, %zmm16, %zmm9
+; AVX512DQ-FCP-NEXT:    vpermd %zmm28, %zmm16, %zmm9
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,4,5,10,11,0,1,22,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm8, %ymm9, %ymm9
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm13 = [6,7,4,5,2,3,0,1,14,15,128,128,128,128,128,128,128,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm13, %ymm4, %ymm4
-; AVX512DQ-FCP-NEXT:    vpor %ymm4, %ymm9, %ymm0
-; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm4 = ymm11[0],ymm5[1],ymm11[2,3],ymm5[4],ymm11[5,6,7]
+; AVX512DQ-FCP-NEXT:    vpor %ymm4, %ymm9, %ymm1
+; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm4 = ymm11[0],ymm3[1],ymm11[2,3],ymm3[4],ymm11[5,6,7]
 ; AVX512DQ-FCP-NEXT:    vextracti128 $1, %ymm4, %xmm9
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm9[1],xmm4[2,3,4,5],xmm9[6],xmm4[7]
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm9 = [0,1,4,5,2,3,0,1,14,15,12,13,12,13,14,15]
@@ -16759,9 +16755,8 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm14 = ymm15[0,1,2,3,4,5,6],ymm14[7]
 ; AVX512DQ-FCP-NEXT:    vpbroadcastd {{.*#+}} xmm15 = [12,13,10,11,12,13,10,11,12,13,10,11,12,13,10,11]
 ; AVX512DQ-FCP-NEXT:    vpshufb %xmm15, %xmm6, %xmm6
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm6, %zmm14, %zmm0
-; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm12[0],ymm2[1],ymm12[2,3,4],ymm2[5],ymm12[6,7]
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm6, %zmm14, %zmm23
+; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm12[0],ymm0[1],ymm12[2,3,4],ymm0[5],ymm12[6,7]
 ; AVX512DQ-FCP-NEXT:    vextracti128 $1, %ymm6, %xmm14
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm6 = xmm14[0],xmm6[1],xmm14[2],xmm6[3],xmm14[4,5,6,7]
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm13, %ymm6, %ymm6
@@ -16769,9 +16764,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm8, %ymm13, %ymm8
 ; AVX512DQ-FCP-NEXT:    vpor %ymm6, %ymm8, %ymm0
 ; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm25, %ymm2
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm17, %ymm12
-; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm12[0],ymm2[1],ymm12[2,3],ymm2[4],ymm12[5,6,7]
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm21, %ymm12
+; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm6 = ymm12[0],ymm5[1],ymm12[2,3],ymm5[4],ymm12[5,6,7]
+; AVX512DQ-FCP-NEXT:    vmovdqa %ymm5, %ymm2
 ; AVX512DQ-FCP-NEXT:    vextracti128 $1, %ymm6, %xmm8
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2,3,4,5],xmm8[6],xmm6[7]
 ; AVX512DQ-FCP-NEXT:    vpshufb %xmm9, %xmm6, %xmm6
@@ -16781,7 +16776,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm8 = ymm8[0,1,2,3,4,5,6],ymm9[7]
 ; AVX512DQ-FCP-NEXT:    vpshufb %xmm15, %xmm7, %xmm7
 ; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm7, %zmm8, %zmm29
-; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0],ymm5[1],ymm11[2,3,4],ymm5[5],ymm11[6,7]
+; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0],ymm3[1],ymm11[2,3,4],ymm3[5],ymm11[6,7]
 ; AVX512DQ-FCP-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4,5,6,7]
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm0 = [0,1,6,7,4,5,2,3,0,1,14,15,12,13,14,15]
@@ -16791,22 +16786,22 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm8, %ymm4, %ymm4
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5,6],ymm4[7]
 ; AVX512DQ-FCP-NEXT:    vpbroadcastw 232(%rdi), %xmm4
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm21, %xmm5
-; AVX512DQ-FCP-NEXT:    vpsrlq $48, %xmm21, %xmm7
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm27, %xmm5
+; AVX512DQ-FCP-NEXT:    vpsrlq $48, %xmm27, %xmm7
 ; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1],xmm4[2],xmm7[2],xmm4[3],xmm7[3]
-; AVX512DQ-FCP-NEXT:    vinserti32x4 $2, %xmm4, %zmm1, %zmm27
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm26, %ymm1
+; AVX512DQ-FCP-NEXT:    vinserti32x4 $2, %xmm4, %zmm1, %zmm28
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm17, %ymm1
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm8, %ymm1, %ymm1
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 576(%rdi), %zmm21
-; AVX512DQ-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} ymm28 = [3,6,10,13,3,6,10,13]
-; AVX512DQ-FCP-NEXT:    # ymm28 = mem[0,1,2,3,0,1,2,3]
-; AVX512DQ-FCP-NEXT:    vpermd %zmm21, %zmm28, %zmm3
+; AVX512DQ-FCP-NEXT:    vbroadcasti32x4 {{.*#+}} ymm27 = [3,6,10,13,3,6,10,13]
+; AVX512DQ-FCP-NEXT:    # ymm27 = mem[0,1,2,3,0,1,2,3]
+; AVX512DQ-FCP-NEXT:    vpermd %zmm21, %zmm27, %zmm3
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm9 = [0,1,2,3,0,1,6,7,8,9,14,15,12,13,14,15,16,17,18,19,16,17,22,23,24,25,30,31,u,u,u,u]
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm9, %ymm3, %ymm3
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5,6],ymm1[7]
 ; AVX512DQ-FCP-NEXT:    movw $992, %ax # imm = 0x3E0
 ; AVX512DQ-FCP-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FCP-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm20 {%k1} # 16-byte Folded Reload
+; AVX512DQ-FCP-NEXT:    vinserti32x4 $2, %xmm26, %zmm1, %zmm20 {%k1}
 ; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm20, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm1 = ymm12[0],ymm2[1],ymm12[2,3,4],ymm2[5],ymm12[6,7]
 ; AVX512DQ-FCP-NEXT:    vextracti128 $1, %ymm1, %xmm2
@@ -16831,7 +16826,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm13 = [0,1,0,1,6,7,8,9,14,15,14,15,14,15,14,15,16,17,16,17,22,23,24,25,30,31,30,31,30,31,30,31]
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm13, %ymm6, %ymm6
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm3 = ymm6[0,1,2,3,4],ymm3[5,6,7],ymm6[8,9,10,11,12],ymm3[13,14,15]
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm19, %xmm12
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm19, %xmm11
 ; AVX512DQ-FCP-NEXT:    vpsrld $16, %xmm19, %xmm6
 ; AVX512DQ-FCP-NEXT:    vpunpckhwd {{.*#+}} xmm6 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
 ; AVX512DQ-FCP-NEXT:    vinserti32x4 $2, %xmm6, %zmm3, %zmm3
@@ -16852,9 +16847,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm6, %ymm14, %ymm14
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm3 = xmm14[0,1,2],xmm3[3,4,5,6],xmm14[7]
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQ-FCP-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm11 # 32-byte Reload
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm8, %ymm11, %ymm8
-; AVX512DQ-FCP-NEXT:    vpermd %zmm2, %zmm28, %zmm14
+; AVX512DQ-FCP-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Reload
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm8, %ymm12, %ymm8
+; AVX512DQ-FCP-NEXT:    vpermd %zmm2, %zmm27, %zmm14
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm9, %ymm14, %ymm9
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3,4,5,6],ymm8[7]
 ; AVX512DQ-FCP-NEXT:    vinserti32x4 $2, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm31 {%k1} # 16-byte Folded Reload
@@ -16879,7 +16874,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vpermd %zmm2, %zmm8, %zmm2
 ; AVX512DQ-FCP-NEXT:    vpshufb {{.*#+}} ymm2 = ymm2[4,5,2,3,4,5,10,11,12,13,12,13,12,13,12,13,20,21,18,19,20,21,26,27,28,29,28,29,28,29,28,29]
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm0 = ymm2[0,1,2,3,4],ymm0[5,6,7],ymm2[8,9,10,11,12],ymm0[13,14,15]
-; AVX512DQ-FCP-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm12[4],xmm5[4],xmm12[5],xmm5[5],xmm12[6],xmm5[6],xmm12[7],xmm5[7]
+; AVX512DQ-FCP-NEXT:    vpunpckhwd {{.*#+}} xmm2 = xmm11[4],xmm5[4],xmm11[5],xmm5[5],xmm11[6],xmm5[6],xmm11[7],xmm5[7]
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} xmm19 = [8,9,6,7,4,5,6,7,8,9,10,11,12,13,14,15]
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm19, %xmm4
 ; AVX512DQ-FCP-NEXT:    vpshufb %xmm4, %xmm2, %xmm2
@@ -16950,7 +16945,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm10 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,2,3,16,17,22,23,24,25,30,31,128,128,128,128,128,128,128,128]
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm10, %ymm6, %ymm6
 ; AVX512DQ-FCP-NEXT:    vpor %ymm6, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT:    vpermd %zmm14, %zmm28, %zmm6
+; AVX512DQ-FCP-NEXT:    vpermd %zmm14, %zmm27, %zmm6
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm12, %ymm6, %ymm6
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm6[6,7]
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm25 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0,0,0,0,0,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535]
@@ -16968,7 +16963,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vpermd %zmm30, %zmm3, %zmm2
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm10, %ymm2, %ymm2
 ; AVX512DQ-FCP-NEXT:    vpor %ymm2, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT:    vpermd %zmm4, %zmm28, %zmm0
+; AVX512DQ-FCP-NEXT:    vpermd %zmm4, %zmm27, %zmm0
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm12, %ymm0, %ymm0
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7]
 ; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
@@ -17009,10 +17004,9 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vpshufb %xmm12, %xmm2, %xmm2
 ; AVX512DQ-FCP-NEXT:    vpor %ymm6, %ymm2, %ymm2
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
-; AVX512DQ-FCP-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm1 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
-; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512DQ-FCP-NEXT:    vpternlogq $184, {{[-0-9]+}}(%r{{[sb]}}p), %zmm25, %zmm23 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm23 {%k1}
+; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm23, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FCP-NEXT:    vmovdqa 864(%rdi), %ymm1
 ; AVX512DQ-FCP-NEXT:    vmovdqa 832(%rdi), %ymm13
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm0 = ymm13[0,1],ymm1[2],ymm13[3,4,5],ymm1[6],ymm13[7]
@@ -17045,13 +17039,13 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7]
 ; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} ymm21 = [0,3,7,10,14,u,u,u]
-; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm28 # 64-byte Reload
-; AVX512DQ-FCP-NEXT:    vpermd %zmm28, %zmm21, %zmm3
+; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm27 # 64-byte Reload
+; AVX512DQ-FCP-NEXT:    vpermd %zmm27, %zmm21, %zmm3
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm1 = [u,u,u,u,u,u,u,u,0,1,6,7,8,9,14,15,16,17,22,23,20,21,22,23,16,17,22,23,24,25,30,31]
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm1, %ymm3, %ymm3
 ; AVX512DQ-FCP-NEXT:    vmovdqa %ymm1, %ymm5
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3,4,5,6,7]
-; AVX512DQ-FCP-NEXT:    vpternlogq $184, %zmm0, %zmm25, %zmm27
+; AVX512DQ-FCP-NEXT:    vpternlogq $184, %zmm0, %zmm25, %zmm28
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm0 = ymm11[0,1,2],ymm8[3],ymm11[4,5],ymm8[6],ymm11[7]
 ; AVX512DQ-FCP-NEXT:    vmovdqa %ymm11, %ymm12
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm8, %ymm19
@@ -17073,7 +17067,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vpshufb %xmm10, %xmm3, %xmm3
 ; AVX512DQ-FCP-NEXT:    vpor %ymm2, %ymm3, %ymm2
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX512DQ-FCP-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm27 {%k1}
+; AVX512DQ-FCP-NEXT:    vinserti32x8 $1, %ymm0, %zmm0, %zmm28 {%k1}
 ; AVX512DQ-FCP-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm14 # 32-byte Reload
 ; AVX512DQ-FCP-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm0 = ymm14[0,1],ymm2[2],ymm14[3,4],ymm2[5],ymm14[6,7]
@@ -17117,7 +17111,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm2, %xmm24
 ; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} ymm22 = [0,4,7,11,14,u,u,u]
-; AVX512DQ-FCP-NEXT:    vpermd %zmm28, %zmm22, %zmm3
+; AVX512DQ-FCP-NEXT:    vpermd %zmm27, %zmm22, %zmm3
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm9 = [u,u,u,u,u,u,u,u,2,3,4,5,10,11,12,13,18,19,18,19,18,19,18,19,18,19,20,21,26,27,28,29]
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm9, %ymm3, %ymm3
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3,4,5,6,7]
@@ -17181,7 +17175,7 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,4,6,7]
 ; AVX512DQ-FCP-NEXT:    vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,4,8,11,15,u,u,u]
-; AVX512DQ-FCP-NEXT:    vpermd %zmm28, %zmm5, %zmm2
+; AVX512DQ-FCP-NEXT:    vpermd %zmm27, %zmm5, %zmm2
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm29, %ymm15
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm15, %ymm2, %ymm2
 ; AVX512DQ-FCP-NEXT:    vpblendd {{.*#+}} ymm12 = ymm1[0,1],ymm2[2,3,4,5,6,7]
@@ -17262,14 +17256,14 @@ define void @load_i16_stride7_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
 ; AVX512DQ-FCP-NEXT:    vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm2 # 64-byte Reload
 ; AVX512DQ-FCP-NEXT:    vmovaps %zmm2, (%r8)
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm26, 64(%r9)
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm27, (%r9)
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm28, (%r9)
 ; AVX512DQ-FCP-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm9, 64(%rax)
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, (%rax)
 ; AVX512DQ-FCP-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, 64(%rax)
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm12, (%rax)
-; AVX512DQ-FCP-NEXT:    addq $1304, %rsp # imm = 0x518
+; AVX512DQ-FCP-NEXT:    addq $1240, %rsp # imm = 0x4D8
 ; AVX512DQ-FCP-NEXT:    vzeroupper
 ; AVX512DQ-FCP-NEXT:    retq
 ;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
index 1881d6d124608..ca3cd2a11b666 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
@@ -1763,9 +1763,8 @@ define void @store_i16_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-NEXT:    vpternlogq $226, %ymm5, %ymm8, %ymm10
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm5
 ; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm7[0,1,2,3],zmm5[4,5,6,7]
-; AVX512-NEXT:    vpbroadcastq 16(%r8), %ymm7
-; AVX512-NEXT:    vmovdqa {{.*#+}} ymm8 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512-NEXT:    vpandn %ymm7, %ymm8, %ymm7
+; AVX512-NEXT:    vmovdqa {{.*#+}} ymm7 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512-NEXT:    vpandnq 16(%r8){1to4}, %ymm7, %ymm7
 ; AVX512-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,ymm4[12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[16,17],zero,zero,zero,zero,zero,zero
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm4
 ; AVX512-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm4
@@ -1841,9 +1840,8 @@ define void @store_i16_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FCP-NEXT:    vpternlogq $226, %ymm5, %ymm8, %ymm10
 ; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm5
 ; AVX512-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm7[0,1,2,3],zmm5[4,5,6,7]
-; AVX512-FCP-NEXT:    vpbroadcastq 16(%r8), %ymm7
-; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm8 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512-FCP-NEXT:    vpandn %ymm7, %ymm8, %ymm7
+; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm7 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512-FCP-NEXT:    vpandnq 16(%r8){1to4}, %ymm7, %ymm7
 ; AVX512-FCP-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,ymm4[12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[16,17],zero,zero,zero,zero,zero,zero
 ; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm4
 ; AVX512-FCP-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm4
@@ -1918,9 +1916,8 @@ define void @store_i16_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-NEXT:    vpternlogq $226, %ymm5, %ymm8, %ymm10
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm5
 ; AVX512DQ-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm7[0,1,2,3],zmm5[4,5,6,7]
-; AVX512DQ-NEXT:    vpbroadcastq 16(%r8), %ymm7
-; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm8 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512DQ-NEXT:    vpandn %ymm7, %ymm8, %ymm7
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm7 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512DQ-NEXT:    vpandnq 16(%r8){1to4}, %ymm7, %ymm7
 ; AVX512DQ-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,ymm4[12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[16,17],zero,zero,zero,zero,zero,zero
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm4
 ; AVX512DQ-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm4
@@ -1996,9 +1993,8 @@ define void @store_i16_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FCP-NEXT:    vpternlogq $226, %ymm5, %ymm8, %ymm10
 ; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm10, %zmm0, %zmm5
 ; AVX512DQ-FCP-NEXT:    vshufi64x2 {{.*#+}} zmm5 = zmm7[0,1,2,3],zmm5[4,5,6,7]
-; AVX512DQ-FCP-NEXT:    vpbroadcastq 16(%r8), %ymm7
-; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm8 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT:    vpandn %ymm7, %ymm8, %ymm7
+; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm7 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512DQ-FCP-NEXT:    vpandnq 16(%r8){1to4}, %ymm7, %ymm7
 ; AVX512DQ-FCP-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,ymm4[12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm4[16,17],zero,zero,zero,zero,zero,zero
 ; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm4
 ; AVX512DQ-FCP-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm4
@@ -3438,63 +3434,62 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-NEXT:    vpternlogq $226, %zmm2, %zmm7, %zmm4
 ; AVX512-NEXT:    vpbroadcastq (%r8), %ymm2
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm8 = ymm9[0,1,1,1]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm8, %zmm2, %zmm12
-; AVX512-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm12
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm2 = ymm18[0,1,2,1,4,5,6,5]
-; AVX512-NEXT:    vprolq $16, %ymm3, %ymm4
-; AVX512-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0,1],ymm2[2],ymm4[3],ymm2[4],ymm4[5,6],ymm2[7],ymm4[8,9],ymm2[10],ymm4[11],ymm2[12],ymm4[13,14],ymm2[15]
-; AVX512-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm4 = ymm18[2,3,2,3,6,7,6,7]
+; AVX512-NEXT:    vinserti64x4 $1, %ymm8, %zmm2, %zmm2
+; AVX512-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm2
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm4 = ymm18[0,1,2,1,4,5,6,5]
+; AVX512-NEXT:    vprolq $16, %ymm3, %ymm8
+; AVX512-NEXT:    vpblendw {{.*#+}} ymm4 = ymm8[0,1],ymm4[2],ymm8[3],ymm4[4],ymm8[5,6],ymm4[7],ymm8[8,9],ymm4[10],ymm8[11],ymm4[12],ymm8[13,14],ymm4[15]
+; AVX512-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm8 = ymm18[2,3,2,3,6,7,6,7]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,2,6,7,6,6]
-; AVX512-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8],ymm4[9],ymm3[10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15]
+; AVX512-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm8[1],ymm3[2],ymm8[3],ymm3[4,5],ymm8[6],ymm3[7,8],ymm8[9],ymm3[10],ymm8[11],ymm3[12,13],ymm8[14],ymm3[15]
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,2]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm4
 ; AVX512-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25,18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25]
 ; AVX512-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512-NEXT:    vpshufb %ymm3, %ymm0, %ymm4
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm8 = ymm6[1,1,1,2,5,5,5,6]
-; AVX512-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm8[1],ymm4[2,3],ymm8[4],ymm4[5],ymm8[6],ymm4[7,8],ymm8[9],ymm4[10,11],ymm8[12],ymm4[13],ymm8[14],ymm4[15]
-; AVX512-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX512-NEXT:    vpshufb %ymm3, %ymm0, %ymm8
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm12 = ymm6[1,1,1,2,5,5,5,6]
+; AVX512-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm12[1],ymm8[2,3],ymm12[4],ymm8[5],ymm12[6],ymm8[7,8],ymm12[9],ymm8[10,11],ymm12[12],ymm8[13],ymm12[14],ymm8[15]
+; AVX512-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[3,2,3,3,7,6,7,7]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
 ; AVX512-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm6[1],ymm0[2],ymm6[3,4],ymm0[5,6,7,8],ymm6[9],ymm0[10],ymm6[11,12],ymm0[13,14,15]
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,3,2]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm0
-; AVX512-NEXT:    vpternlogq $226, %zmm2, %zmm7, %zmm0
-; AVX512-NEXT:    vpbroadcastq 48(%r8), %ymm2
-; AVX512-NEXT:    vpbroadcastq 56(%r8), %ymm4
-; AVX512-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
-; AVX512-NEXT:    vpternlogd $184, %zmm0, %zmm4, %zmm2
+; AVX512-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512-NEXT:    vpternlogq $226, %zmm4, %zmm7, %zmm0
+; AVX512-NEXT:    vpbroadcastq 48(%r8), %ymm4
+; AVX512-NEXT:    vpbroadcastq 56(%r8), %ymm6
+; AVX512-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm4
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
+; AVX512-NEXT:    vpternlogd $184, %zmm0, %zmm6, %zmm4
 ; AVX512-NEXT:    vmovdqa64 %ymm22, %ymm0
 ; AVX512-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
-; AVX512-NEXT:    vpermq {{.*#+}} ymm6 = ymm20[1,1,2,2]
-; AVX512-NEXT:    vpblendw {{.*#+}} ymm0 = ymm6[0],ymm0[1],ymm6[2,3],ymm0[4],ymm6[5],ymm0[6],ymm6[7,8],ymm0[9],ymm6[10,11],ymm0[12],ymm6[13],ymm0[14],ymm6[15]
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm6 = ymm20[0,1,2,1,4,5,6,5]
+; AVX512-NEXT:    vpermq {{.*#+}} ymm7 = ymm20[1,1,2,2]
+; AVX512-NEXT:    vpblendw {{.*#+}} ymm0 = ymm7[0],ymm0[1],ymm7[2,3],ymm0[4],ymm7[5],ymm0[6],ymm7[7,8],ymm0[9],ymm7[10,11],ymm0[12],ymm7[13],ymm0[14],ymm7[15]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm7 = ymm20[0,1,2,1,4,5,6,5]
 ; AVX512-NEXT:    vprolq $16, %ymm5, %ymm5
-; AVX512-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm6[2],ymm5[3],ymm6[4],ymm5[5,6],ymm6[7],ymm5[8,9],ymm6[10],ymm5[11],ymm6[12],ymm5[13,14],ymm6[15]
+; AVX512-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm7[2],ymm5[3],ymm7[4],ymm5[5,6],ymm7[7],ymm5[8,9],ymm7[10],ymm5[11],ymm7[12],ymm5[13,14],ymm7[15]
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm0
 ; AVX512-NEXT:    vmovdqa64 %ymm23, %ymm5
 ; AVX512-NEXT:    vpshufb %ymm5, %ymm1, %ymm5
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm6 = ymm17[3,0,3,0,7,4,7,4]
-; AVX512-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2],ymm5[3],ymm6[4,5],ymm5[6],ymm6[7,8],ymm5[9],ymm6[10],ymm5[11],ymm6[12,13],ymm5[14],ymm6[15]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm7 = ymm17[3,0,3,0,7,4,7,4]
+; AVX512-NEXT:    vpblendw {{.*#+}} ymm5 = ymm7[0],ymm5[1],ymm7[2],ymm5[3],ymm7[4,5],ymm5[6],ymm7[7,8],ymm5[9],ymm7[10],ymm5[11],ymm7[12,13],ymm5[14],ymm7[15]
 ; AVX512-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm3 = ymm17[1,1,1,2,5,5,5,6]
 ; AVX512-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5],ymm3[6],ymm1[7,8],ymm3[9],ymm1[10,11],ymm3[12],ymm1[13],ymm3[14],ymm1[15]
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm1
 ; AVX512-NEXT:    vpternlogq $226, %zmm0, %zmm16, %zmm1
-; AVX512-NEXT:    vpbroadcastq 16(%r8), %ymm0
-; AVX512-NEXT:    vpandn %ymm0, %ymm4, %ymm0
+; AVX512-NEXT:    vpandnq 16(%r8){1to4}, %ymm6, %ymm0
 ; AVX512-NEXT:    vpshufb %ymm11, %ymm9, %ymm3
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
 ; AVX512-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
 ; AVX512-NEXT:    vmovdqa64 %zmm0, 64(%r9)
-; AVX512-NEXT:    vmovdqa64 %zmm2, 256(%r9)
-; AVX512-NEXT:    vmovdqa64 %zmm12, (%r9)
+; AVX512-NEXT:    vmovdqa64 %zmm4, 256(%r9)
+; AVX512-NEXT:    vmovdqa64 %zmm2, (%r9)
 ; AVX512-NEXT:    vmovdqa64 %zmm10, 192(%r9)
 ; AVX512-NEXT:    vmovdqa64 %zmm19, 128(%r9)
 ; AVX512-NEXT:    vzeroupper
@@ -3505,7 +3500,7 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FCP-NEXT:    vmovdqa 32(%rsi), %ymm5
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm1 = [u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u]
 ; AVX512-FCP-NEXT:    vpshufb %ymm1, %ymm5, %ymm0
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm1, %ymm17
+; AVX512-FCP-NEXT:    vmovdqa64 %ymm1, %ymm19
 ; AVX512-FCP-NEXT:    vmovdqa 32(%rdi), %ymm7
 ; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm7[1,1,2,2]
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
@@ -3518,24 +3513,24 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
 ; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
 ; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512-FCP-NEXT:    vmovdqa (%rcx), %ymm1
+; AVX512-FCP-NEXT:    vmovdqa (%rcx), %ymm4
 ; AVX512-FCP-NEXT:    vmovdqa 32(%rcx), %ymm8
-; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm3 = [u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u]
-; AVX512-FCP-NEXT:    vpshufb %ymm3, %ymm8, %ymm2
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm3, %ymm24
-; AVX512-FCP-NEXT:    vmovdqa64 (%rdx), %ymm18
+; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm2 = [u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u]
+; AVX512-FCP-NEXT:    vpshufb %ymm2, %ymm8, %ymm1
+; AVX512-FCP-NEXT:    vmovdqa64 %ymm2, %ymm24
+; AVX512-FCP-NEXT:    vmovdqa64 (%rdx), %ymm17
 ; AVX512-FCP-NEXT:    vmovdqa 32(%rdx), %ymm9
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm4 = ymm9[3,0,3,0,7,4,7,4]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4,5],ymm2[6],ymm4[7,8],ymm2[9],ymm4[10],ymm2[11],ymm4[12,13],ymm2[14],ymm4[15]
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm2 = ymm9[3,0,3,0,7,4,7,4]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
 ; AVX512-FCP-NEXT:    vmovdqa 32(%rcx), %xmm11
-; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm4 = [10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9]
-; AVX512-FCP-NEXT:    vpshufb %xmm4, %xmm11, %xmm13
+; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm15 = [10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9]
+; AVX512-FCP-NEXT:    vpshufb %xmm15, %xmm11, %xmm2
 ; AVX512-FCP-NEXT:    vmovdqa 32(%rdx), %xmm6
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} xmm15 = xmm6[1,2,2,2]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0],xmm15[1],xmm13[2],xmm15[3],xmm13[4,5],xmm15[6],xmm13[7]
-; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,0]
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm13, %zmm15
-; AVX512-FCP-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm15
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} xmm13 = xmm6[1,2,2,2]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm13[1],xmm2[2],xmm13[3],xmm2[4,5],xmm13[6],xmm2[7]
+; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,0]
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512-FCP-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
 ; AVX512-FCP-NEXT:    vmovdqa (%r8), %ymm0
 ; AVX512-FCP-NEXT:    vmovdqa 32(%r8), %ymm2
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm13 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
@@ -3544,119 +3539,118 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,1,1]
 ; AVX512-FCP-NEXT:    vpandnq %ymm2, %ymm16, %ymm2
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm19
-; AVX512-FCP-NEXT:    vpternlogq $248, %zmm16, %zmm15, %zmm19
-; AVX512-FCP-NEXT:    vmovdqa (%rcx), %xmm3
-; AVX512-FCP-NEXT:    vpshufb %xmm4, %xmm3, %xmm4
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm18
+; AVX512-FCP-NEXT:    vpternlogq $248, %zmm16, %zmm1, %zmm18
+; AVX512-FCP-NEXT:    vmovdqa (%rcx), %xmm1
+; AVX512-FCP-NEXT:    vpshufb %xmm15, %xmm1, %xmm3
 ; AVX512-FCP-NEXT:    vmovdqa (%rdx), %xmm15
 ; AVX512-FCP-NEXT:    vpshufd {{.*#+}} xmm14 = xmm15[1,2,2,2]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm14[1],xmm4[2],xmm14[3],xmm4[4,5],xmm14[6],xmm4[7]
-; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm14[1],xmm3[2],xmm14[3],xmm3[4,5],xmm14[6],xmm3[7]
+; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3]
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm2 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512-FCP-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
+; AVX512-FCP-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
 ; AVX512-FCP-NEXT:    vmovdqa64 %xmm2, %xmm21
 ; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [0,1,0,1,8,9,8,8]
-; AVX512-FCP-NEXT:    vpermi2q %zmm4, %zmm3, %zmm14
-; AVX512-FCP-NEXT:    vpshufb %xmm12, %xmm10, %xmm3
-; AVX512-FCP-NEXT:    vpbroadcastq 8(%rdi), %xmm4
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5],xmm4[6],xmm3[7]
-; AVX512-FCP-NEXT:    vmovdqa (%rdi), %xmm4
+; AVX512-FCP-NEXT:    vpermi2q %zmm3, %zmm1, %zmm14
+; AVX512-FCP-NEXT:    vpshufb %xmm12, %xmm10, %xmm1
+; AVX512-FCP-NEXT:    vpbroadcastq 8(%rdi), %xmm3
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5],xmm3[6],xmm1[7]
+; AVX512-FCP-NEXT:    vmovdqa (%rdi), %xmm3
 ; AVX512-FCP-NEXT:    vmovdqa64 32(%rdi), %xmm22
-; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3]
+; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1],xmm3[2],xmm10[2],xmm3[3],xmm10[3]
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
-; AVX512-FCP-NEXT:    vpshufb %xmm2, %xmm4, %xmm4
+; AVX512-FCP-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
 ; AVX512-FCP-NEXT:    vmovdqa64 %xmm2, %xmm23
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
-; AVX512-FCP-NEXT:    vpermq {{.*#+}} zmm3 = zmm3[0,1,0,1,4,5,4,5]
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
 ; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512-FCP-NEXT:    vpternlogq $226, %zmm14, %zmm16, %zmm3
-; AVX512-FCP-NEXT:    vpbroadcastq (%r8), %ymm4
+; AVX512-FCP-NEXT:    vpternlogq $226, %zmm14, %zmm16, %zmm1
+; AVX512-FCP-NEXT:    vpbroadcastq (%r8), %ymm3
 ; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm10 = ymm0[0,1,1,1]
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm10, %zmm4, %zmm10
-; AVX512-FCP-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm10
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm10, %zmm3, %zmm10
+; AVX512-FCP-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm10
 ; AVX512-FCP-NEXT:    vmovdqa (%rsi), %ymm15
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm17, %ymm2
-; AVX512-FCP-NEXT:    vpshufb %ymm2, %ymm15, %ymm3
+; AVX512-FCP-NEXT:    vmovdqa64 %ymm19, %ymm1
+; AVX512-FCP-NEXT:    vpshufb %ymm1, %ymm15, %ymm1
 ; AVX512-FCP-NEXT:    vmovdqa (%rdi), %ymm14
-; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm4 = ymm14[1,1,2,2]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5],ymm3[6],ymm4[7,8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13],ymm3[14],ymm4[15]
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm4 = ymm14[0,1,2,1,4,5,6,5]
+; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm3 = ymm14[1,1,2,2]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5],ymm1[6],ymm3[7,8],ymm1[9],ymm3[10,11],ymm1[12],ymm3[13],ymm1[14],ymm3[15]
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm3 = ymm14[0,1,2,1,4,5,6,5]
 ; AVX512-FCP-NEXT:    vprolq $16, %ymm15, %ymm13
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm4 = ymm13[0,1],ymm4[2],ymm13[3],ymm4[4],ymm13[5,6],ymm4[7],ymm13[8,9],ymm4[10],ymm13[11],ymm4[12],ymm13[13,14],ymm4[15]
-; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm24, %ymm2
-; AVX512-FCP-NEXT:    vpshufb %ymm2, %ymm1, %ymm4
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm13 = ymm18[3,0,3,0,7,4,7,4]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm4[1],ymm13[2],ymm4[3],ymm13[4,5],ymm4[6],ymm13[7,8],ymm4[9],ymm13[10],ymm4[11],ymm13[12,13],ymm4[14],ymm13[15]
-; AVX512-FCP-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25,18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25]
-; AVX512-FCP-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX512-FCP-NEXT:    vpshufb %ymm4, %ymm1, %ymm2
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm12 = ymm18[1,1,1,2,5,5,5,6]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm3 = ymm13[0,1],ymm3[2],ymm13[3],ymm3[4],ymm13[5,6],ymm3[7],ymm13[8,9],ymm3[10],ymm13[11],ymm3[12],ymm13[13,14],ymm3[15]
+; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm3
+; AVX512-FCP-NEXT:    vmovdqa64 %ymm24, %ymm1
+; AVX512-FCP-NEXT:    vpshufb %ymm1, %ymm4, %ymm1
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm13 = ymm17[3,0,3,0,7,4,7,4]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm1[1],ymm13[2],ymm1[3],ymm13[4,5],ymm1[6],ymm13[7,8],ymm1[9],ymm13[10],ymm1[11],ymm13[12,13],ymm1[14],ymm13[15]
+; AVX512-FCP-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25,18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25]
+; AVX512-FCP-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512-FCP-NEXT:    vpshufb %ymm1, %ymm4, %ymm2
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm12 = ymm17[1,1,1,2,5,5,5,6]
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm12[1],ymm2[2,3],ymm12[4],ymm2[5],ymm12[6],ymm2[7,8],ymm12[9],ymm2[10,11],ymm12[12],ymm2[13],ymm12[14],ymm2[15]
 ; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
 ; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm13, %zmm2
-; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
-; AVX512-FCP-NEXT:    vpternlogq $226, %zmm3, %zmm17, %zmm2
-; AVX512-FCP-NEXT:    vpbroadcastq 16(%r8), %ymm3
-; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
-; AVX512-FCP-NEXT:    vpandn %ymm3, %ymm12, %ymm3
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm25, %ymm13
-; AVX512-FCP-NEXT:    vpshufb %ymm13, %ymm0, %ymm0
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
+; AVX512-FCP-NEXT:    vpternlogq $226, %zmm3, %zmm19, %zmm2
+; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
+; AVX512-FCP-NEXT:    vpandnq 16(%r8){1to4}, %ymm3, %ymm13
+; AVX512-FCP-NEXT:    vmovdqa64 %ymm25, %ymm12
+; AVX512-FCP-NEXT:    vpshufb %ymm12, %ymm0, %ymm0
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm0
 ; AVX512-FCP-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
 ; AVX512-FCP-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29]
 ; AVX512-FCP-NEXT:    # ymm13 = mem[0,1,0,1]
 ; AVX512-FCP-NEXT:    vpshufb %ymm13, %ymm5, %ymm2
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm3 = ymm7[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7,8],ymm3[9],ymm2[10],ymm3[11],ymm2[12,13],ymm3[14],ymm2[15]
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm3 = ymm7[0,1,2,1,4,5,6,5]
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm12 = ymm7[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm12[1],ymm2[2],ymm12[3],ymm2[4,5],ymm12[6],ymm2[7,8],ymm12[9],ymm2[10],ymm12[11],ymm2[12,13],ymm12[14],ymm2[15]
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[0,1,2,1,4,5,6,5]
 ; AVX512-FCP-NEXT:    vprolq $16, %ymm5, %ymm5
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0,1],ymm3[2],ymm5[3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8,9],ymm3[10],ymm5[11],ymm3[12],ymm5[13,14],ymm3[15]
-; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [2,3,2,3,10,11,10,10]
-; AVX512-FCP-NEXT:    vpermi2q %zmm2, %zmm3, %zmm5
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm7[2],ymm5[3],ymm7[4],ymm5[5,6],ymm7[7],ymm5[8,9],ymm7[10],ymm5[11],ymm7[12],ymm5[13,14],ymm7[15]
+; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [2,3,2,3,10,11,10,10]
+; AVX512-FCP-NEXT:    vpermi2q %zmm2, %zmm5, %zmm7
 ; AVX512-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31]
-; AVX512-FCP-NEXT:    vpshufb %ymm2, %ymm8, %ymm3
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm7 = ymm9[3,2,3,3,7,6,7,7]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm7[1],ymm3[2],ymm7[3,4],ymm3[5,6,7,8],ymm7[9],ymm3[10],ymm7[11,12],ymm3[13,14,15]
-; AVX512-FCP-NEXT:    vpshufb %ymm4, %ymm8, %ymm4
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm7 = ymm9[1,1,1,2,5,5,5,6]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm7[1],ymm4[2,3],ymm7[4],ymm4[5],ymm7[6],ymm4[7,8],ymm7[9],ymm4[10,11],ymm7[12],ymm4[13],ymm7[14],ymm4[15]
-; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [2,3,2,3,10,10,11,10]
-; AVX512-FCP-NEXT:    vpermi2q %zmm3, %zmm4, %zmm7
-; AVX512-FCP-NEXT:    vpternlogq $226, %zmm5, %zmm16, %zmm7
-; AVX512-FCP-NEXT:    vpbroadcastq 48(%r8), %ymm3
-; AVX512-FCP-NEXT:    vpbroadcastq 56(%r8), %ymm4
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512-FCP-NEXT:    vpternlogd $184, %zmm7, %zmm12, %zmm3
-; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm11[0],xmm6[0],xmm11[1],xmm6[1],xmm11[2],xmm6[2],xmm11[3],xmm6[3]
+; AVX512-FCP-NEXT:    vpshufb %ymm2, %ymm8, %ymm5
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm12 = ymm9[3,2,3,3,7,6,7,7]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2],ymm12[3,4],ymm5[5,6,7,8],ymm12[9],ymm5[10],ymm12[11,12],ymm5[13,14,15]
+; AVX512-FCP-NEXT:    vpshufb %ymm1, %ymm8, %ymm1
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm8 = ymm9[1,1,1,2,5,5,5,6]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2,3],ymm8[4],ymm1[5],ymm8[6],ymm1[7,8],ymm8[9],ymm1[10,11],ymm8[12],ymm1[13],ymm8[14],ymm1[15]
+; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [2,3,2,3,10,10,11,10]
+; AVX512-FCP-NEXT:    vpermi2q %zmm5, %zmm1, %zmm8
+; AVX512-FCP-NEXT:    vpternlogq $226, %zmm7, %zmm16, %zmm8
+; AVX512-FCP-NEXT:    vpbroadcastq 48(%r8), %ymm1
+; AVX512-FCP-NEXT:    vpbroadcastq 56(%r8), %ymm5
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm1
+; AVX512-FCP-NEXT:    vpternlogd $184, %zmm8, %zmm3, %zmm1
+; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm11[0],xmm6[0],xmm11[1],xmm6[1],xmm11[2],xmm6[2],xmm11[3],xmm6[3]
 ; AVX512-FCP-NEXT:    vmovdqa64 %xmm21, %xmm5
-; AVX512-FCP-NEXT:    vpshufb %xmm5, %xmm4, %xmm4
-; AVX512-FCP-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm2 = ymm18[3,2,3,3,7,6,7,7]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3,4],ymm1[5,6,7,8],ymm2[9],ymm1[10],ymm2[11,12],ymm1[13,14,15]
-; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [2,2,3,2,8,9,8,9]
-; AVX512-FCP-NEXT:    vpermi2q %zmm4, %zmm1, %zmm2
-; AVX512-FCP-NEXT:    vmovdqa64 %xmm20, %xmm1
-; AVX512-FCP-NEXT:    vmovdqa64 %xmm22, %xmm4
-; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; AVX512-FCP-NEXT:    vmovdqa64 %xmm23, %xmm4
-; AVX512-FCP-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
-; AVX512-FCP-NEXT:    vpshufb %ymm13, %ymm15, %ymm4
+; AVX512-FCP-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
+; AVX512-FCP-NEXT:    vpshufb %ymm2, %ymm4, %ymm2
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm4 = ymm17[3,2,3,3,7,6,7,7]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2],ymm4[3,4],ymm2[5,6,7,8],ymm4[9],ymm2[10],ymm4[11,12],ymm2[13,14,15]
+; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [2,2,3,2,8,9,8,9]
+; AVX512-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm4
+; AVX512-FCP-NEXT:    vmovdqa64 %xmm20, %xmm2
+; AVX512-FCP-NEXT:    vmovdqa64 %xmm22, %xmm3
+; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX512-FCP-NEXT:    vmovdqa64 %xmm23, %xmm3
+; AVX512-FCP-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; AVX512-FCP-NEXT:    vpshufb %ymm13, %ymm15, %ymm3
 ; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm5 = ymm14[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7,8],ymm5[9],ymm4[10],ymm5[11],ymm4[12,13],ymm5[14],ymm4[15]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2],ymm5[3],ymm3[4,5],ymm5[6],ymm3[7,8],ymm5[9],ymm3[10],ymm5[11],ymm3[12,13],ymm5[14],ymm3[15]
 ; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [2,3,2,2,8,9,8,9]
-; AVX512-FCP-NEXT:    vpermi2q %zmm1, %zmm4, %zmm5
-; AVX512-FCP-NEXT:    vpternlogq $226, %zmm2, %zmm17, %zmm5
-; AVX512-FCP-NEXT:    vpbroadcastq 24(%r8), %ymm1
-; AVX512-FCP-NEXT:    vpbroadcastq 32(%r8), %ymm2
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512-FCP-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm1
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, 128(%r9)
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm3, 256(%r9)
+; AVX512-FCP-NEXT:    vpermi2q %zmm2, %zmm3, %zmm5
+; AVX512-FCP-NEXT:    vpternlogq $226, %zmm4, %zmm19, %zmm5
+; AVX512-FCP-NEXT:    vpbroadcastq 24(%r8), %ymm2
+; AVX512-FCP-NEXT:    vpbroadcastq 32(%r8), %ymm3
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512-FCP-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm2
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, 128(%r9)
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, 256(%r9)
 ; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, 64(%r9)
 ; AVX512-FCP-NEXT:    vmovdqa64 %zmm10, (%r9)
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm19, 192(%r9)
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm18, 192(%r9)
 ; AVX512-FCP-NEXT:    vzeroupper
 ; AVX512-FCP-NEXT:    retq
 ;
@@ -3754,63 +3748,62 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-NEXT:    vpternlogq $226, %zmm2, %zmm7, %zmm4
 ; AVX512DQ-NEXT:    vpbroadcastq (%r8), %ymm2
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm8 = ymm9[0,1,1,1]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm8, %zmm2, %zmm12
-; AVX512DQ-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm12
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm2 = ymm18[0,1,2,1,4,5,6,5]
-; AVX512DQ-NEXT:    vprolq $16, %ymm3, %ymm4
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0,1],ymm2[2],ymm4[3],ymm2[4],ymm4[5,6],ymm2[7],ymm4[8,9],ymm2[10],ymm4[11],ymm2[12],ymm4[13,14],ymm2[15]
-; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm4 = ymm18[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm8, %zmm2, %zmm2
+; AVX512DQ-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm2
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm4 = ymm18[0,1,2,1,4,5,6,5]
+; AVX512DQ-NEXT:    vprolq $16, %ymm3, %ymm8
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm4 = ymm8[0,1],ymm4[2],ymm8[3],ymm4[4],ymm8[5,6],ymm4[7],ymm8[8,9],ymm4[10],ymm8[11],ymm4[12],ymm8[13,14],ymm4[15]
+; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm8 = ymm18[2,3,2,3,6,7,6,7]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,2,6,7,6,6]
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8],ymm4[9],ymm3[10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15]
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm8[1],ymm3[2],ymm8[3],ymm3[4,5],ymm8[6],ymm3[7,8],ymm8[9],ymm3[10],ymm8[11],ymm3[12,13],ymm8[14],ymm3[15]
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,2]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm4
 ; AVX512DQ-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25,18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25]
 ; AVX512DQ-NEXT:    # ymm3 = mem[0,1,0,1]
-; AVX512DQ-NEXT:    vpshufb %ymm3, %ymm0, %ymm4
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm8 = ymm6[1,1,1,2,5,5,5,6]
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm8[1],ymm4[2,3],ymm8[4],ymm4[5],ymm8[6],ymm4[7,8],ymm8[9],ymm4[10,11],ymm8[12],ymm4[13],ymm8[14],ymm4[15]
-; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX512DQ-NEXT:    vpshufb %ymm3, %ymm0, %ymm8
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm12 = ymm6[1,1,1,2,5,5,5,6]
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm12[1],ymm8[2,3],ymm12[4],ymm8[5],ymm12[6],ymm8[7,8],ymm12[9],ymm8[10,11],ymm12[12],ymm8[13],ymm12[14],ymm8[15]
+; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[2,3,2,3]
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[3,2,3,3,7,6,7,7]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
 ; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm6[1],ymm0[2],ymm6[3,4],ymm0[5,6,7,8],ymm6[9],ymm0[10],ymm6[11,12],ymm0[13,14,15]
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,3,2]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm0
-; AVX512DQ-NEXT:    vpternlogq $226, %zmm2, %zmm7, %zmm0
-; AVX512DQ-NEXT:    vpbroadcastq 48(%r8), %ymm2
-; AVX512DQ-NEXT:    vpbroadcastq 56(%r8), %ymm4
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
-; AVX512DQ-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
-; AVX512DQ-NEXT:    vpternlogd $184, %zmm0, %zmm4, %zmm2
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512DQ-NEXT:    vpternlogq $226, %zmm4, %zmm7, %zmm0
+; AVX512DQ-NEXT:    vpbroadcastq 48(%r8), %ymm4
+; AVX512DQ-NEXT:    vpbroadcastq 56(%r8), %ymm6
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm4
+; AVX512DQ-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
+; AVX512DQ-NEXT:    vpternlogd $184, %zmm0, %zmm6, %zmm4
 ; AVX512DQ-NEXT:    vmovdqa64 %ymm22, %ymm0
 ; AVX512DQ-NEXT:    vpshufb %ymm0, %ymm5, %ymm0
-; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm6 = ymm20[1,1,2,2]
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm0 = ymm6[0],ymm0[1],ymm6[2,3],ymm0[4],ymm6[5],ymm0[6],ymm6[7,8],ymm0[9],ymm6[10,11],ymm0[12],ymm6[13],ymm0[14],ymm6[15]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm6 = ymm20[0,1,2,1,4,5,6,5]
+; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm7 = ymm20[1,1,2,2]
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm0 = ymm7[0],ymm0[1],ymm7[2,3],ymm0[4],ymm7[5],ymm0[6],ymm7[7,8],ymm0[9],ymm7[10,11],ymm0[12],ymm7[13],ymm0[14],ymm7[15]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm7 = ymm20[0,1,2,1,4,5,6,5]
 ; AVX512DQ-NEXT:    vprolq $16, %ymm5, %ymm5
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm6[2],ymm5[3],ymm6[4],ymm5[5,6],ymm6[7],ymm5[8,9],ymm6[10],ymm5[11],ymm6[12],ymm5[13,14],ymm6[15]
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm7[2],ymm5[3],ymm7[4],ymm5[5,6],ymm7[7],ymm5[8,9],ymm7[10],ymm5[11],ymm7[12],ymm5[13,14],ymm7[15]
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm0
 ; AVX512DQ-NEXT:    vmovdqa64 %ymm23, %ymm5
 ; AVX512DQ-NEXT:    vpshufb %ymm5, %ymm1, %ymm5
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm6 = ymm17[3,0,3,0,7,4,7,4]
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2],ymm5[3],ymm6[4,5],ymm5[6],ymm6[7,8],ymm5[9],ymm6[10],ymm5[11],ymm6[12,13],ymm5[14],ymm6[15]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm7 = ymm17[3,0,3,0,7,4,7,4]
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm5 = ymm7[0],ymm5[1],ymm7[2],ymm5[3],ymm7[4,5],ymm5[6],ymm7[7,8],ymm5[9],ymm7[10],ymm5[11],ymm7[12,13],ymm5[14],ymm7[15]
 ; AVX512DQ-NEXT:    vpshufb %ymm3, %ymm1, %ymm1
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm3 = ymm17[1,1,1,2,5,5,5,6]
 ; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5],ymm3[6],ymm1[7,8],ymm3[9],ymm1[10,11],ymm3[12],ymm1[13],ymm3[14],ymm1[15]
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm1
 ; AVX512DQ-NEXT:    vpternlogq $226, %zmm0, %zmm16, %zmm1
-; AVX512DQ-NEXT:    vpbroadcastq 16(%r8), %ymm0
-; AVX512DQ-NEXT:    vpandn %ymm0, %ymm4, %ymm0
+; AVX512DQ-NEXT:    vpandnq 16(%r8){1to4}, %ymm6, %ymm0
 ; AVX512DQ-NEXT:    vpshufb %ymm11, %ymm9, %ymm3
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm0, %zmm3, %zmm0
 ; AVX512DQ-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
 ; AVX512DQ-NEXT:    vmovdqa64 %zmm0, 64(%r9)
-; AVX512DQ-NEXT:    vmovdqa64 %zmm2, 256(%r9)
-; AVX512DQ-NEXT:    vmovdqa64 %zmm12, (%r9)
+; AVX512DQ-NEXT:    vmovdqa64 %zmm4, 256(%r9)
+; AVX512DQ-NEXT:    vmovdqa64 %zmm2, (%r9)
 ; AVX512DQ-NEXT:    vmovdqa64 %zmm10, 192(%r9)
 ; AVX512DQ-NEXT:    vmovdqa64 %zmm19, 128(%r9)
 ; AVX512DQ-NEXT:    vzeroupper
@@ -3821,7 +3814,7 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FCP-NEXT:    vmovdqa 32(%rsi), %ymm5
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm1 = [u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u]
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm1, %ymm5, %ymm0
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm1, %ymm17
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm1, %ymm19
 ; AVX512DQ-FCP-NEXT:    vmovdqa 32(%rdi), %ymm7
 ; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm7[1,1,2,2]
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
@@ -3834,24 +3827,24 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
 ; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
 ; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512DQ-FCP-NEXT:    vmovdqa (%rcx), %ymm1
+; AVX512DQ-FCP-NEXT:    vmovdqa (%rcx), %ymm4
 ; AVX512DQ-FCP-NEXT:    vmovdqa 32(%rcx), %ymm8
-; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm3 = [u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u]
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm3, %ymm8, %ymm2
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm3, %ymm24
-; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rdx), %ymm18
+; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm2 = [u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u]
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm2, %ymm8, %ymm1
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm2, %ymm24
+; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rdx), %ymm17
 ; AVX512DQ-FCP-NEXT:    vmovdqa 32(%rdx), %ymm9
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm4 = ymm9[3,0,3,0,7,4,7,4]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4,5],ymm2[6],ymm4[7,8],ymm2[9],ymm4[10],ymm2[11],ymm4[12,13],ymm2[14],ymm4[15]
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm2 = ymm9[3,0,3,0,7,4,7,4]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4,5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10],ymm1[11],ymm2[12,13],ymm1[14],ymm2[15]
 ; AVX512DQ-FCP-NEXT:    vmovdqa 32(%rcx), %xmm11
-; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm4 = [10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9]
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm4, %xmm11, %xmm13
+; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm15 = [10,11,u,u,6,7,u,u,8,9,8,9,u,u,8,9]
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm15, %xmm11, %xmm2
 ; AVX512DQ-FCP-NEXT:    vmovdqa 32(%rdx), %xmm6
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} xmm15 = xmm6[1,2,2,2]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm13 = xmm13[0],xmm15[1],xmm13[2],xmm15[3],xmm13[4,5],xmm15[6],xmm13[7]
-; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[0,1,0,0]
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm13, %zmm15
-; AVX512DQ-FCP-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm15
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} xmm13 = xmm6[1,2,2,2]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm2 = xmm2[0],xmm13[1],xmm2[2],xmm13[3],xmm2[4,5],xmm13[6],xmm2[7]
+; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,0,0]
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512DQ-FCP-NEXT:    vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
 ; AVX512DQ-FCP-NEXT:    vmovdqa (%r8), %ymm0
 ; AVX512DQ-FCP-NEXT:    vmovdqa 32(%r8), %ymm2
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm13 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
@@ -3860,119 +3853,118 @@ define void @store_i16_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,1,1]
 ; AVX512DQ-FCP-NEXT:    vpandnq %ymm2, %ymm16, %ymm2
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm19
-; AVX512DQ-FCP-NEXT:    vpternlogq $248, %zmm16, %zmm15, %zmm19
-; AVX512DQ-FCP-NEXT:    vmovdqa (%rcx), %xmm3
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm4, %xmm3, %xmm4
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm18
+; AVX512DQ-FCP-NEXT:    vpternlogq $248, %zmm16, %zmm1, %zmm18
+; AVX512DQ-FCP-NEXT:    vmovdqa (%rcx), %xmm1
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm15, %xmm1, %xmm3
 ; AVX512DQ-FCP-NEXT:    vmovdqa (%rdx), %xmm15
 ; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} xmm14 = xmm15[1,2,2,2]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm14[1],xmm4[2],xmm14[3],xmm4[4,5],xmm14[6],xmm4[7]
-; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm14[1],xmm3[2],xmm14[3],xmm3[4,5],xmm14[6],xmm3[7]
+; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3]
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm2 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm2, %xmm21
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm14 = [0,1,0,1,8,9,8,8]
-; AVX512DQ-FCP-NEXT:    vpermi2q %zmm4, %zmm3, %zmm14
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm12, %xmm10, %xmm3
-; AVX512DQ-FCP-NEXT:    vpbroadcastq 8(%rdi), %xmm4
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5],xmm4[6],xmm3[7]
-; AVX512DQ-FCP-NEXT:    vmovdqa (%rdi), %xmm4
+; AVX512DQ-FCP-NEXT:    vpermi2q %zmm3, %zmm1, %zmm14
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm12, %xmm10, %xmm1
+; AVX512DQ-FCP-NEXT:    vpbroadcastq 8(%rdi), %xmm3
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm3[1],xmm1[2,3],xmm3[4],xmm1[5],xmm3[6],xmm1[7]
+; AVX512DQ-FCP-NEXT:    vmovdqa (%rdi), %xmm3
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 32(%rdi), %xmm22
-; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3]
+; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1],xmm3[2],xmm10[2],xmm3[3],xmm10[3]
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm2, %xmm4, %xmm4
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm2, %xmm3, %xmm3
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm2, %xmm23
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
-; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} zmm3 = zmm3[0,1,0,1,4,5,4,5]
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm3, %zmm1
+; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} zmm1 = zmm1[0,1,0,1,4,5,4,5]
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm14, %zmm16, %zmm3
-; AVX512DQ-FCP-NEXT:    vpbroadcastq (%r8), %ymm4
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm14, %zmm16, %zmm1
+; AVX512DQ-FCP-NEXT:    vpbroadcastq (%r8), %ymm3
 ; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm10 = ymm0[0,1,1,1]
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm10, %zmm4, %zmm10
-; AVX512DQ-FCP-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm10
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm10, %zmm3, %zmm10
+; AVX512DQ-FCP-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm10
 ; AVX512DQ-FCP-NEXT:    vmovdqa (%rsi), %ymm15
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm17, %ymm2
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm2, %ymm15, %ymm3
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm19, %ymm1
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm1, %ymm15, %ymm1
 ; AVX512DQ-FCP-NEXT:    vmovdqa (%rdi), %ymm14
-; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm4 = ymm14[1,1,2,2]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5],ymm3[6],ymm4[7,8],ymm3[9],ymm4[10,11],ymm3[12],ymm4[13],ymm3[14],ymm4[15]
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm4 = ymm14[0,1,2,1,4,5,6,5]
+; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm3 = ymm14[1,1,2,2]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5],ymm1[6],ymm3[7,8],ymm1[9],ymm3[10,11],ymm1[12],ymm3[13],ymm1[14],ymm3[15]
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm3 = ymm14[0,1,2,1,4,5,6,5]
 ; AVX512DQ-FCP-NEXT:    vprolq $16, %ymm15, %ymm13
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm4 = ymm13[0,1],ymm4[2],ymm13[3],ymm4[4],ymm13[5,6],ymm4[7],ymm13[8,9],ymm4[10],ymm13[11],ymm4[12],ymm13[13,14],ymm4[15]
-; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm24, %ymm2
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm2, %ymm1, %ymm4
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm13 = ymm18[3,0,3,0,7,4,7,4]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm4[1],ymm13[2],ymm4[3],ymm13[4,5],ymm4[6],ymm13[7,8],ymm4[9],ymm13[10],ymm4[11],ymm13[12,13],ymm4[14],ymm13[15]
-; AVX512DQ-FCP-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25,18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25]
-; AVX512DQ-FCP-NEXT:    # ymm4 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm4, %ymm1, %ymm2
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm12 = ymm18[1,1,1,2,5,5,5,6]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm3 = ymm13[0,1],ymm3[2],ymm13[3],ymm3[4],ymm13[5,6],ymm3[7],ymm13[8,9],ymm3[10],ymm13[11],ymm3[12],ymm13[13,14],ymm3[15]
+; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[2,3,2,3]
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm1, %zmm3
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm24, %ymm1
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm1, %ymm4, %ymm1
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm13 = ymm17[3,0,3,0,7,4,7,4]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm1[1],ymm13[2],ymm1[3],ymm13[4,5],ymm1[6],ymm13[7,8],ymm1[9],ymm13[10],ymm1[11],ymm13[12,13],ymm1[14],ymm13[15]
+; AVX512DQ-FCP-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25,18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25]
+; AVX512DQ-FCP-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm1, %ymm4, %ymm2
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm12 = ymm17[1,1,1,2,5,5,5,6]
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm12[1],ymm2[2,3],ymm12[4],ymm2[5],ymm12[6],ymm2[7,8],ymm12[9],ymm2[10,11],ymm12[12],ymm2[13],ymm12[14],ymm2[15]
 ; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[2,3,2,3]
 ; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm13, %zmm2
-; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm3, %zmm17, %zmm2
-; AVX512DQ-FCP-NEXT:    vpbroadcastq 16(%r8), %ymm3
-; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
-; AVX512DQ-FCP-NEXT:    vpandn %ymm3, %ymm12, %ymm3
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm25, %ymm13
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm13, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm0, %zmm0
+; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm3, %zmm19, %zmm2
+; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
+; AVX512DQ-FCP-NEXT:    vpandnq 16(%r8){1to4}, %ymm3, %ymm13
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm25, %ymm12
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm12, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm13, %zmm0, %zmm0
 ; AVX512DQ-FCP-NEXT:    vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
 ; AVX512DQ-FCP-NEXT:    vbroadcasti128 {{.*#+}} ymm13 = [30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29]
 ; AVX512DQ-FCP-NEXT:    # ymm13 = mem[0,1,0,1]
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm13, %ymm5, %ymm2
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm3 = ymm7[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4,5],ymm3[6],ymm2[7,8],ymm3[9],ymm2[10],ymm3[11],ymm2[12,13],ymm3[14],ymm2[15]
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm3 = ymm7[0,1,2,1,4,5,6,5]
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm12 = ymm7[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm12[1],ymm2[2],ymm12[3],ymm2[4,5],ymm12[6],ymm2[7,8],ymm12[9],ymm2[10],ymm12[11],ymm2[12,13],ymm12[14],ymm2[15]
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm7 = ymm7[0,1,2,1,4,5,6,5]
 ; AVX512DQ-FCP-NEXT:    vprolq $16, %ymm5, %ymm5
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0,1],ymm3[2],ymm5[3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8,9],ymm3[10],ymm5[11],ymm3[12],ymm5[13,14],ymm3[15]
-; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [2,3,2,3,10,11,10,10]
-; AVX512DQ-FCP-NEXT:    vpermi2q %zmm2, %zmm3, %zmm5
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0,1],ymm7[2],ymm5[3],ymm7[4],ymm5[5,6],ymm7[7],ymm5[8,9],ymm7[10],ymm5[11],ymm7[12],ymm5[13,14],ymm7[15]
+; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [2,3,2,3,10,11,10,10]
+; AVX512DQ-FCP-NEXT:    vpermi2q %zmm2, %zmm5, %zmm7
 ; AVX512DQ-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31]
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm2, %ymm8, %ymm3
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm7 = ymm9[3,2,3,3,7,6,7,7]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm7[1],ymm3[2],ymm7[3,4],ymm3[5,6,7,8],ymm7[9],ymm3[10],ymm7[11,12],ymm3[13,14,15]
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm4, %ymm8, %ymm4
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm7 = ymm9[1,1,1,2,5,5,5,6]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm7[1],ymm4[2,3],ymm7[4],ymm4[5],ymm7[6],ymm4[7,8],ymm7[9],ymm4[10,11],ymm7[12],ymm4[13],ymm7[14],ymm4[15]
-; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [2,3,2,3,10,10,11,10]
-; AVX512DQ-FCP-NEXT:    vpermi2q %zmm3, %zmm4, %zmm7
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm5, %zmm16, %zmm7
-; AVX512DQ-FCP-NEXT:    vpbroadcastq 48(%r8), %ymm3
-; AVX512DQ-FCP-NEXT:    vpbroadcastq 56(%r8), %ymm4
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
-; AVX512DQ-FCP-NEXT:    vpternlogd $184, %zmm7, %zmm12, %zmm3
-; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm11[0],xmm6[0],xmm11[1],xmm6[1],xmm11[2],xmm6[2],xmm11[3],xmm6[3]
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm2, %ymm8, %ymm5
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm12 = ymm9[3,2,3,3,7,6,7,7]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2],ymm12[3,4],ymm5[5,6,7,8],ymm12[9],ymm5[10],ymm12[11,12],ymm5[13,14,15]
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm1, %ymm8, %ymm1
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm8 = ymm9[1,1,1,2,5,5,5,6]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2,3],ymm8[4],ymm1[5],ymm8[6],ymm1[7,8],ymm8[9],ymm1[10,11],ymm8[12],ymm1[13],ymm8[14],ymm1[15]
+; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm8 = [2,3,2,3,10,10,11,10]
+; AVX512DQ-FCP-NEXT:    vpermi2q %zmm5, %zmm1, %zmm8
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm7, %zmm16, %zmm8
+; AVX512DQ-FCP-NEXT:    vpbroadcastq 48(%r8), %ymm1
+; AVX512DQ-FCP-NEXT:    vpbroadcastq 56(%r8), %ymm5
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm1
+; AVX512DQ-FCP-NEXT:    vpternlogd $184, %zmm8, %zmm3, %zmm1
+; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm3 = xmm11[0],xmm6[0],xmm11[1],xmm6[1],xmm11[2],xmm6[2],xmm11[3],xmm6[3]
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm21, %xmm5
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm5, %xmm4, %xmm4
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm2 = ymm18[3,2,3,3,7,6,7,7]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3,4],ymm1[5,6,7,8],ymm2[9],ymm1[10],ymm2[11,12],ymm1[13,14,15]
-; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm2 = [2,2,3,2,8,9,8,9]
-; AVX512DQ-FCP-NEXT:    vpermi2q %zmm4, %zmm1, %zmm2
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm20, %xmm1
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm22, %xmm4
-; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm23, %xmm4
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm4, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm13, %ymm15, %ymm4
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm2, %ymm4, %ymm2
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm4 = ymm17[3,2,3,3,7,6,7,7]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2],ymm4[3,4],ymm2[5,6,7,8],ymm4[9],ymm2[10],ymm4[11,12],ymm2[13,14,15]
+; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [2,2,3,2,8,9,8,9]
+; AVX512DQ-FCP-NEXT:    vpermi2q %zmm3, %zmm2, %zmm4
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm20, %xmm2
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm22, %xmm3
+; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %xmm23, %xmm3
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm13, %ymm15, %ymm3
 ; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm5 = ymm14[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2],ymm5[3],ymm4[4,5],ymm5[6],ymm4[7,8],ymm5[9],ymm4[10],ymm5[11],ymm4[12,13],ymm5[14],ymm4[15]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2],ymm5[3],ymm3[4,5],ymm5[6],ymm3[7,8],ymm5[9],ymm3[10],ymm5[11],ymm3[12,13],ymm5[14],ymm3[15]
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [2,3,2,2,8,9,8,9]
-; AVX512DQ-FCP-NEXT:    vpermi2q %zmm1, %zmm4, %zmm5
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm2, %zmm17, %zmm5
-; AVX512DQ-FCP-NEXT:    vpbroadcastq 24(%r8), %ymm1
-; AVX512DQ-FCP-NEXT:    vpbroadcastq 32(%r8), %ymm2
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512DQ-FCP-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm1
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, 128(%r9)
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm3, 256(%r9)
+; AVX512DQ-FCP-NEXT:    vpermi2q %zmm2, %zmm3, %zmm5
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm4, %zmm19, %zmm5
+; AVX512DQ-FCP-NEXT:    vpbroadcastq 24(%r8), %ymm2
+; AVX512DQ-FCP-NEXT:    vpbroadcastq 32(%r8), %ymm3
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
+; AVX512DQ-FCP-NEXT:    vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm2
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, 128(%r9)
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, 256(%r9)
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, 64(%r9)
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm10, (%r9)
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm19, 192(%r9)
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm18, 192(%r9)
 ; AVX512DQ-FCP-NEXT:    vzeroupper
 ; AVX512DQ-FCP-NEXT:    retq
 ;
@@ -6773,27 +6765,27 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-NEXT:    vmovdqa {{.*#+}} ymm3 = [u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u]
 ; AVX512-NEXT:    vpshufb %ymm3, %ymm9, %ymm1
 ; AVX512-NEXT:    vmovdqa64 %ymm3, %ymm20
-; AVX512-NEXT:    vmovdqa 96(%rdi), %ymm5
-; AVX512-NEXT:    vpermq {{.*#+}} ymm3 = ymm5[1,1,2,2]
+; AVX512-NEXT:    vmovdqa 96(%rdi), %ymm4
+; AVX512-NEXT:    vpermq {{.*#+}} ymm3 = ymm4[1,1,2,2]
 ; AVX512-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5],ymm1[6],ymm3[7,8],ymm1[9],ymm3[10,11],ymm1[12],ymm3[13],ymm1[14],ymm3[15]
 ; AVX512-NEXT:    vmovdqa 96(%rsi), %xmm1
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm12 = [6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13]
-; AVX512-NEXT:    vpshufb %xmm12, %xmm1, %xmm4
-; AVX512-NEXT:    vpbroadcastq 104(%rdi), %xmm6
-; AVX512-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1],xmm4[2,3],xmm6[4],xmm4[5],xmm6[6],xmm4[7]
-; AVX512-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm5 = [6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13]
+; AVX512-NEXT:    vpshufb %xmm5, %xmm1, %xmm6
+; AVX512-NEXT:    vpbroadcastq 104(%rdi), %xmm8
+; AVX512-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2,3],xmm8[4],xmm6[5],xmm8[6],xmm6[7]
+; AVX512-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512-NEXT:    vinserti64x4 $1, %ymm3, %zmm6, %zmm3
 ; AVX512-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512-NEXT:    vmovdqa 96(%r8), %ymm3
-; AVX512-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,ymm3[12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[16,17],zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,ymm3[12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[16,17],zero,zero,zero,zero,zero,zero
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,1,1]
 ; AVX512-NEXT:    vpandn %ymm3, %ymm15, %ymm3
-; AVX512-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512-NEXT:    vinserti64x4 $1, %ymm6, %zmm3, %zmm3
 ; AVX512-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; AVX512-NEXT:    vmovdqa {{.*#+}} xmm13 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
+; AVX512-NEXT:    vmovdqa {{.*#+}} xmm12 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX512-NEXT:    vpshufb %xmm12, %xmm0, %xmm0
 ; AVX512-NEXT:    vmovdqa64 (%rdx), %ymm16
 ; AVX512-NEXT:    vmovdqa 64(%rdx), %ymm2
 ; AVX512-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -6828,7 +6820,7 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
 ; AVX512-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512-NEXT:    vmovdqa 64(%rsi), %xmm0
-; AVX512-NEXT:    vpshufb %xmm12, %xmm0, %xmm1
+; AVX512-NEXT:    vpshufb %xmm5, %xmm0, %xmm1
 ; AVX512-NEXT:    vpbroadcastq 72(%rdi), %xmm2
 ; AVX512-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
 ; AVX512-NEXT:    vmovdqa 64(%rdi), %xmm2
@@ -6838,12 +6830,12 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,5,6]
 ; AVX512-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm1
 ; AVX512-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vpshufb %xmm12, %xmm0, %xmm1
+; AVX512-NEXT:    vpshufb %xmm5, %xmm0, %xmm1
 ; AVX512-NEXT:    vpbroadcastq 8(%rdi), %xmm2
 ; AVX512-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
 ; AVX512-NEXT:    vmovdqa (%rdi), %xmm2
 ; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm4 = ymm16[3,2,3,3,7,6,7,7]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm8 = ymm16[3,2,3,3,7,6,7,7]
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
 ; AVX512-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
@@ -6851,11 +6843,11 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-NEXT:    vmovdqa (%rcx), %ymm2
 ; AVX512-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm2[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX512-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2],ymm4[3,4],ymm0[5,6,7,8],ymm4[9],ymm0[10],ymm4[11,12],ymm0[13,14,15]
-; AVX512-NEXT:    vmovdqa 32(%rdx), %xmm4
+; AVX512-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm8[1],ymm0[2],ymm8[3,4],ymm0[5,6,7,8],ymm8[9],ymm0[10],ymm8[11,12],ymm0[13,14,15]
+; AVX512-NEXT:    vmovdqa 32(%rdx), %xmm8
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,3,2]
-; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; AVX512-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3]
+; AVX512-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
 ; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm25 = zmm0[0,1,2,3],zmm1[0,1,0,1]
 ; AVX512-NEXT:    vmovdqa64 (%rdi), %ymm30
 ; AVX512-NEXT:    vmovdqa (%rsi), %ymm10
@@ -6864,9 +6856,9 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm1 = ymm30[2,3,2,3,6,7,6,7]
 ; AVX512-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8],ymm1[9],ymm0[10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,2]
-; AVX512-NEXT:    vmovdqa 32(%rsi), %xmm8
+; AVX512-NEXT:    vmovdqa 32(%rsi), %xmm13
 ; AVX512-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3]
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3]
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,6]
 ; AVX512-NEXT:    vshufi64x2 {{.*#+}} zmm27 = zmm0[0,1,2,3],zmm1[0,1,0,1]
@@ -6876,62 +6868,62 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm14 = ymm19[3,0,3,0,7,4,7,4]
 ; AVX512-NEXT:    vpblendw {{.*#+}} ymm0 = ymm14[0],ymm0[1],ymm14[2],ymm0[3],ymm14[4,5],ymm0[6],ymm14[7,8],ymm0[9],ymm14[10],ymm0[11],ymm14[12,13],ymm0[14],ymm14[15]
 ; AVX512-NEXT:    vpshufb %xmm7, %xmm6, %xmm6
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,2,2,2]
-; AVX512-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1],xmm6[2],xmm4[3],xmm6[4,5],xmm4[6],xmm6[7]
-; AVX512-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,0]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm0
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[1,2,2,2]
+; AVX512-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2],xmm8[3],xmm6[4,5],xmm8[6],xmm6[7]
+; AVX512-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,0]
+; AVX512-NEXT:    vinserti64x4 $1, %ymm0, %zmm6, %zmm0
 ; AVX512-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-NEXT:    vmovdqa 32(%rdi), %ymm4
+; AVX512-NEXT:    vmovdqa 32(%rdi), %ymm6
 ; AVX512-NEXT:    vmovdqa 32(%rsi), %ymm0
-; AVX512-NEXT:    vmovdqa64 %ymm20, %ymm6
-; AVX512-NEXT:    vpshufb %ymm6, %ymm0, %ymm6
-; AVX512-NEXT:    vpermq {{.*#+}} ymm14 = ymm4[1,1,2,2]
-; AVX512-NEXT:    vpblendw {{.*#+}} ymm6 = ymm14[0],ymm6[1],ymm14[2,3],ymm6[4],ymm14[5],ymm6[6],ymm14[7,8],ymm6[9],ymm14[10,11],ymm6[12],ymm14[13],ymm6[14],ymm14[15]
-; AVX512-NEXT:    vpshufb %xmm12, %xmm8, %xmm8
-; AVX512-NEXT:    vpbroadcastq 40(%rdi), %xmm12
-; AVX512-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm12[1],xmm8[2,3],xmm12[4],xmm8[5],xmm12[6],xmm8[7]
-; AVX512-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm6, %zmm8, %zmm24
-; AVX512-NEXT:    vmovdqa 32(%r8), %ymm6
-; AVX512-NEXT:    vpermq {{.*#+}} ymm8 = ymm6[0,1,1,1]
+; AVX512-NEXT:    vmovdqa64 %ymm20, %ymm8
+; AVX512-NEXT:    vpshufb %ymm8, %ymm0, %ymm8
+; AVX512-NEXT:    vpermq {{.*#+}} ymm14 = ymm6[1,1,2,2]
+; AVX512-NEXT:    vpblendw {{.*#+}} ymm8 = ymm14[0],ymm8[1],ymm14[2,3],ymm8[4],ymm14[5],ymm8[6],ymm14[7,8],ymm8[9],ymm14[10,11],ymm8[12],ymm14[13],ymm8[14],ymm14[15]
+; AVX512-NEXT:    vpshufb %xmm5, %xmm13, %xmm5
+; AVX512-NEXT:    vpbroadcastq 40(%rdi), %xmm13
+; AVX512-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm13[1],xmm5[2,3],xmm13[4],xmm5[5],xmm13[6],xmm5[7]
+; AVX512-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
+; AVX512-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm24
+; AVX512-NEXT:    vmovdqa 32(%r8), %ymm5
+; AVX512-NEXT:    vpermq {{.*#+}} ymm8 = ymm5[0,1,1,1]
 ; AVX512-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
-; AVX512-NEXT:    vpshufb %ymm14, %ymm6, %ymm6
+; AVX512-NEXT:    vpshufb %ymm14, %ymm5, %ymm5
 ; AVX512-NEXT:    vpandn %ymm8, %ymm15, %ymm8
-; AVX512-NEXT:    vinserti64x4 $1, %ymm6, %zmm8, %zmm26
-; AVX512-NEXT:    vpshufb %xmm13, %xmm3, %xmm3
+; AVX512-NEXT:    vinserti64x4 $1, %ymm5, %zmm8, %zmm26
+; AVX512-NEXT:    vpshufb %xmm12, %xmm3, %xmm3
 ; AVX512-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512-NEXT:    vmovdqa (%rcx), %xmm3
-; AVX512-NEXT:    vmovdqa (%rdx), %xmm6
-; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3]
-; AVX512-NEXT:    vpshufb %xmm13, %xmm12, %xmm8
-; AVX512-NEXT:    vmovdqa64 %ymm8, %ymm28
+; AVX512-NEXT:    vmovdqa (%rdx), %xmm5
+; AVX512-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; AVX512-NEXT:    vpshufb %xmm12, %xmm13, %xmm8
+; AVX512-NEXT:    vmovdqa64 %ymm8, %ymm29
 ; AVX512-NEXT:    vpshufb %xmm7, %xmm3, %xmm3
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,2,2,2]
-; AVX512-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1],xmm3[2],xmm6[3],xmm3[4,5],xmm6[6],xmm3[7]
-; AVX512-NEXT:    vmovdqa64 %ymm3, %ymm23
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm3 = ymm5[0,1,2,1,4,5,6,5]
-; AVX512-NEXT:    vprolq $16, %ymm9, %ymm6
-; AVX512-NEXT:    vpblendw {{.*#+}} ymm3 = ymm6[0,1],ymm3[2],ymm6[3],ymm3[4],ymm6[5,6],ymm3[7],ymm6[8,9],ymm3[10],ymm6[11],ymm3[12],ymm6[13,14],ymm3[15]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,2,2,2]
+; AVX512-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm5[1],xmm3[2],xmm5[3],xmm3[4,5],xmm5[6],xmm3[7]
+; AVX512-NEXT:    vmovdqa64 %ymm3, %ymm28
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm3 = ymm4[0,1,2,1,4,5,6,5]
+; AVX512-NEXT:    vprolq $16, %ymm9, %ymm5
+; AVX512-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0,1],ymm3[2],ymm5[3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8,9],ymm3[10],ymm5[11],ymm3[12],ymm5[13,14],ymm3[15]
 ; AVX512-NEXT:    vmovdqa64 %ymm3, %ymm22
 ; AVX512-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm9[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,2,6,7,6,6]
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,3,2,3,6,7,6,7]
-; AVX512-NEXT:    vpblendw {{.*#+}} ymm13 = ymm3[0],ymm5[1],ymm3[2],ymm5[3],ymm3[4,5],ymm5[6],ymm3[7,8],ymm5[9],ymm3[10],ymm5[11],ymm3[12,13],ymm5[14],ymm3[15]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,3,2,3,6,7,6,7]
+; AVX512-NEXT:    vpblendw {{.*#+}} ymm13 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8],ymm4[9],ymm3[10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15]
 ; AVX512-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25,18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25]
 ; AVX512-NEXT:    # ymm7 = mem[0,1,0,1]
 ; AVX512-NEXT:    vpshufb %ymm7, %ymm11, %ymm3
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm6 = ymm17[1,1,1,2,5,5,5,6]
-; AVX512-NEXT:    vpblendw {{.*#+}} ymm12 = ymm3[0],ymm6[1],ymm3[2,3],ymm6[4],ymm3[5],ymm6[6],ymm3[7,8],ymm6[9],ymm3[10,11],ymm6[12],ymm3[13],ymm6[14],ymm3[15]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm5 = ymm17[1,1,1,2,5,5,5,6]
+; AVX512-NEXT:    vpblendw {{.*#+}} ymm12 = ymm3[0],ymm5[1],ymm3[2,3],ymm5[4],ymm3[5],ymm5[6],ymm3[7,8],ymm5[9],ymm3[10,11],ymm5[12],ymm3[13],ymm5[14],ymm3[15]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm11[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,3,6,7,6,7]
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm6 = ymm17[3,2,3,3,7,6,7,7]
-; AVX512-NEXT:    vpblendw {{.*#+}} ymm11 = ymm3[0],ymm6[1],ymm3[2],ymm6[3,4],ymm3[5,6,7,8],ymm6[9],ymm3[10],ymm6[11,12],ymm3[13,14,15]
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm3 = ymm4[0,1,2,1,4,5,6,5]
-; AVX512-NEXT:    vprolq $16, %ymm0, %ymm6
-; AVX512-NEXT:    vpblendw {{.*#+}} ymm15 = ymm6[0,1],ymm3[2],ymm6[3],ymm3[4],ymm6[5,6],ymm3[7],ymm6[8,9],ymm3[10],ymm6[11],ymm3[12],ymm6[13,14],ymm3[15]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm5 = ymm17[3,2,3,3,7,6,7,7]
+; AVX512-NEXT:    vpblendw {{.*#+}} ymm11 = ymm3[0],ymm5[1],ymm3[2],ymm5[3,4],ymm3[5,6,7,8],ymm5[9],ymm3[10],ymm5[11,12],ymm3[13,14,15]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm3 = ymm6[0,1,2,1,4,5,6,5]
+; AVX512-NEXT:    vprolq $16, %ymm0, %ymm5
+; AVX512-NEXT:    vpblendw {{.*#+}} ymm15 = ymm5[0,1],ymm3[2],ymm5[3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8,9],ymm3[10],ymm5[11],ymm3[12],ymm5[13,14],ymm3[15]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,2,6,7,6,6]
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm3 = ymm4[2,3,2,3,6,7,6,7]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm3 = ymm6[2,3,2,3,6,7,6,7]
 ; AVX512-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4,5],ymm3[6],ymm0[7,8],ymm3[9],ymm0[10],ymm3[11],ymm0[12,13],ymm3[14],ymm0[15]
 ; AVX512-NEXT:    vmovdqa64 %ymm0, %ymm21
 ; AVX512-NEXT:    vpshufb %ymm7, %ymm1, %ymm0
@@ -6960,11 +6952,10 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm9
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
-; AVX512-NEXT:    vpbroadcastq 16(%r8), %ymm0
-; AVX512-NEXT:    vpandnq %ymm0, %ymm16, %ymm0
+; AVX512-NEXT:    vpandnq 16(%r8){1to4}, %ymm16, %ymm0
 ; AVX512-NEXT:    vmovdqa (%r8), %ymm10
 ; AVX512-NEXT:    vpshufb %ymm14, %ymm10, %ymm1
-; AVX512-NEXT:    vmovdqa64 %ymm14, %ymm29
+; AVX512-NEXT:    vmovdqa64 %ymm14, %ymm23
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm17
 ; AVX512-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX512-NEXT:    vpshufb %ymm5, %ymm2, %ymm0
@@ -6980,7 +6971,7 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[3,0,3,0,7,4,7,4]
 ; AVX512-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
-; AVX512-NEXT:    vpshufb %ymm7, %ymm2, %ymm5
+; AVX512-NEXT:    vpshufb %ymm7, %ymm2, %ymm4
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
 ; AVX512-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19, %zmm31 # 64-byte Folded Reload
 ; AVX512-NEXT:    vpbroadcastq 88(%r8), %ymm1
@@ -7001,8 +6992,8 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-NEXT:    # ymm18 = mem[0,1,0,1]
 ; AVX512-NEXT:    vpermq $4, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload
 ; AVX512-NEXT:    # ymm25 = mem[0,1,0,0]
-; AVX512-NEXT:    vpermq {{.*#+}} ymm27 = ymm28[0,1,0,1]
-; AVX512-NEXT:    vpermq {{.*#+}} ymm28 = ymm23[0,1,0,0]
+; AVX512-NEXT:    vpermq {{.*#+}} ymm27 = ymm29[0,1,0,1]
+; AVX512-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[0,1,0,0]
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,1,1,1]
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm14 = ymm22[2,3,2,3]
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,2]
@@ -7011,38 +7002,37 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm7 = ymm21[2,3,2,2]
 ; AVX512-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512-NEXT:    vpermq {{.*#+}} ymm4 = ymm8[2,2,3,2]
+; AVX512-NEXT:    vpermq {{.*#+}} ymm5 = ymm8[2,2,3,2]
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm8 = ymm3[1,1,1,2,5,5,5,6]
-; AVX512-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm8[1],ymm5[2,3],ymm8[4],ymm5[5],ymm8[6],ymm5[7,8],ymm8[9],ymm5[10,11],ymm8[12],ymm5[13],ymm8[14],ymm5[15]
-; AVX512-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm0
-; AVX512-NEXT:    vmovdqa 64(%r8), %ymm5
-; AVX512-NEXT:    vpermq {{.*#+}} ymm8 = ymm5[0,1,1,1]
-; AVX512-NEXT:    vmovdqa64 %ymm29, %ymm3
-; AVX512-NEXT:    vpshufb %ymm3, %ymm5, %ymm5
-; AVX512-NEXT:    vpbroadcastq 80(%r8), %ymm29
-; AVX512-NEXT:    vpandnq %ymm29, %ymm16, %ymm29
-; AVX512-NEXT:    vinserti64x4 $1, %ymm29, %zmm5, %zmm5
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm8[1],ymm4[2,3],ymm8[4],ymm4[5],ymm8[6],ymm4[7,8],ymm8[9],ymm4[10,11],ymm8[12],ymm4[13],ymm8[14],ymm4[15]
+; AVX512-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX512-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512-NEXT:    vmovdqa 64(%r8), %ymm4
+; AVX512-NEXT:    vpermq {{.*#+}} ymm8 = ymm4[0,1,1,1]
+; AVX512-NEXT:    vmovdqa64 %ymm23, %ymm3
+; AVX512-NEXT:    vpshufb %ymm3, %ymm4, %ymm4
+; AVX512-NEXT:    vpandnq 80(%r8){1to4}, %ymm16, %ymm29
+; AVX512-NEXT:    vinserti64x4 $1, %ymm29, %zmm4, %zmm4
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512-NEXT:    vpternlogq $248, %zmm22, %zmm31, %zmm23
-; AVX512-NEXT:    vpternlogq $248, %zmm22, %zmm24, %zmm26
+; AVX512-NEXT:    vpternlogq $248, %zmm21, %zmm31, %zmm23
+; AVX512-NEXT:    vpternlogq $248, %zmm21, %zmm24, %zmm26
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm25, %zmm18, %zmm18
-; AVX512-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm22 = mem[0,1,0,1,4,5,4,5]
+; AVX512-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm21 = mem[0,1,0,1,4,5,4,5]
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm24 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512-NEXT:    vpternlogq $226, %zmm18, %zmm24, %zmm22
+; AVX512-NEXT:    vpternlogq $226, %zmm18, %zmm24, %zmm21
 ; AVX512-NEXT:    vpbroadcastq 64(%r8), %ymm18
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm8, %zmm18, %zmm8
 ; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512-NEXT:    vpternlogd $184, %zmm22, %zmm18, %zmm8
-; AVX512-NEXT:    vinserti64x4 $1, %ymm28, %zmm27, %zmm22
-; AVX512-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Folded Reload
-; AVX512-NEXT:    # zmm21 = mem[0,1,0,1,4,5,4,5]
-; AVX512-NEXT:    vpternlogq $226, %zmm22, %zmm24, %zmm21
-; AVX512-NEXT:    vpbroadcastq (%r8), %ymm22
-; AVX512-NEXT:    vinserti64x4 $1, %ymm10, %zmm22, %zmm10
-; AVX512-NEXT:    vpternlogd $184, %zmm21, %zmm18, %zmm10
+; AVX512-NEXT:    vpternlogd $184, %zmm21, %zmm18, %zmm8
+; AVX512-NEXT:    vinserti64x4 $1, %ymm28, %zmm27, %zmm21
+; AVX512-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Folded Reload
+; AVX512-NEXT:    # zmm22 = mem[0,1,0,1,4,5,4,5]
+; AVX512-NEXT:    vpternlogq $226, %zmm21, %zmm24, %zmm22
+; AVX512-NEXT:    vpbroadcastq (%r8), %ymm21
+; AVX512-NEXT:    vinserti64x4 $1, %ymm10, %zmm21, %zmm10
+; AVX512-NEXT:    vpternlogd $184, %zmm22, %zmm18, %zmm10
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm13, %zmm14, %zmm13
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm11, %zmm12, %zmm11
 ; AVX512-NEXT:    vpternlogq $226, %zmm13, %zmm24, %zmm11
@@ -7051,18 +7041,18 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm12
 ; AVX512-NEXT:    vpternlogd $184, %zmm11, %zmm16, %zmm12
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm7, %zmm15, %zmm7
-; AVX512-NEXT:    vinserti64x4 $1, %ymm4, %zmm6, %zmm4
-; AVX512-NEXT:    vpternlogq $226, %zmm7, %zmm24, %zmm4
+; AVX512-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm5
+; AVX512-NEXT:    vpternlogq $226, %zmm7, %zmm24, %zmm5
 ; AVX512-NEXT:    vpbroadcastq 48(%r8), %ymm6
 ; AVX512-NEXT:    vpbroadcastq 56(%r8), %ymm7
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
-; AVX512-NEXT:    vpternlogd $184, %zmm4, %zmm16, %zmm6
+; AVX512-NEXT:    vpternlogd $184, %zmm5, %zmm16, %zmm6
 ; AVX512-NEXT:    vpternlogq $226, %zmm30, %zmm19, %zmm9
 ; AVX512-NEXT:    vpternlogq $226, %zmm20, %zmm19, %zmm0
-; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512-NEXT:    vpternlogq $248, %zmm4, %zmm9, %zmm17
-; AVX512-NEXT:    vpternlogq $248, %zmm4, %zmm0, %zmm5
-; AVX512-NEXT:    vmovdqa64 %zmm5, 384(%r9)
+; AVX512-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512-NEXT:    vpternlogq $248, %zmm5, %zmm9, %zmm17
+; AVX512-NEXT:    vpternlogq $248, %zmm5, %zmm0, %zmm4
+; AVX512-NEXT:    vmovdqa64 %zmm4, 384(%r9)
 ; AVX512-NEXT:    vmovdqa64 %zmm17, 64(%r9)
 ; AVX512-NEXT:    vmovdqa64 %zmm6, 256(%r9)
 ; AVX512-NEXT:    vmovdqa64 %zmm12, 576(%r9)
@@ -7080,8 +7070,8 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FCP:       # %bb.0:
 ; AVX512-FCP-NEXT:    subq $360, %rsp # imm = 0x168
 ; AVX512-FCP-NEXT:    vmovdqa 96(%rcx), %ymm8
-; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm15 = [u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u]
-; AVX512-FCP-NEXT:    vpshufb %ymm15, %ymm8, %ymm0
+; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm13 = [u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u]
+; AVX512-FCP-NEXT:    vpshufb %ymm13, %ymm8, %ymm0
 ; AVX512-FCP-NEXT:    vmovdqa64 96(%rdx), %ymm19
 ; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm19[3,0,3,0,7,4,7,4]
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
@@ -7097,10 +7087,10 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512-FCP-NEXT:    vmovdqa 96(%rsi), %ymm3
-; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm12 = [u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u]
-; AVX512-FCP-NEXT:    vpshufb %ymm12, %ymm3, %ymm1
-; AVX512-FCP-NEXT:    vmovdqa64 96(%rdi), %ymm24
-; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm24[1,1,2,2]
+; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm9 = [u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u]
+; AVX512-FCP-NEXT:    vpshufb %ymm9, %ymm3, %ymm1
+; AVX512-FCP-NEXT:    vmovdqa64 96(%rdi), %ymm23
+; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm23[1,1,2,2]
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
 ; AVX512-FCP-NEXT:    vmovdqa 96(%rsi), %xmm4
 ; AVX512-FCP-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -7112,17 +7102,17 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm4, %zmm2
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512-FCP-NEXT:    vmovdqa 96(%r8), %ymm2
-; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm13 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
-; AVX512-FCP-NEXT:    vpshufb %ymm13, %ymm2, %ymm4
+; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm15 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
+; AVX512-FCP-NEXT:    vpshufb %ymm15, %ymm2, %ymm4
 ; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,1,1]
 ; AVX512-FCP-NEXT:    vpandn %ymm2, %ymm5, %ymm2
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm5, %zmm9
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm5, %zmm7
 ; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512-FCP-NEXT:    vmovdqa 32(%rcx), %ymm2
 ; AVX512-FCP-NEXT:    vmovdqu %ymm2, (%rsp) # 32-byte Spill
-; AVX512-FCP-NEXT:    vpshufb %ymm15, %ymm2, %ymm2
+; AVX512-FCP-NEXT:    vpshufb %ymm13, %ymm2, %ymm2
 ; AVX512-FCP-NEXT:    vmovdqa64 32(%rdx), %ymm29
 ; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm4 = ymm29[3,0,3,0,7,4,7,4]
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4,5],ymm2[6],ymm4[7,8],ymm2[9],ymm4[10],ymm2[11],ymm4[12,13],ymm2[14],ymm4[15]
@@ -7144,7 +7134,7 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
 ; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
 ; AVX512-FCP-NEXT:    vpshufb %xmm5, %xmm2, %xmm2
-; AVX512-FCP-NEXT:    vmovdqa %xmm5, %xmm7
+; AVX512-FCP-NEXT:    vmovdqa %xmm5, %xmm10
 ; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
 ; AVX512-FCP-NEXT:    vmovdqa (%rsi), %xmm4
 ; AVX512-FCP-NEXT:    vpshufb %xmm1, %xmm4, %xmm5
@@ -7152,229 +7142,228 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm6[1],xmm5[2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
 ; AVX512-FCP-NEXT:    vmovdqa (%rdi), %xmm6
 ; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; AVX512-FCP-NEXT:    vpshufb %xmm7, %xmm4, %xmm4
+; AVX512-FCP-NEXT:    vpshufb %xmm10, %xmm4, %xmm4
 ; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512-FCP-NEXT:    vmovdqa 32(%rsi), %ymm14
-; AVX512-FCP-NEXT:    vpshufb %ymm12, %ymm14, %ymm5
-; AVX512-FCP-NEXT:    vmovdqa 32(%rdi), %ymm6
-; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[1,1,2,2]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm5 = ymm7[0],ymm5[1],ymm7[2,3],ymm5[4],ymm7[5],ymm5[6],ymm7[7,8],ymm5[9],ymm7[10,11],ymm5[12],ymm7[13],ymm5[14],ymm7[15]
-; AVX512-FCP-NEXT:    vmovdqa 32(%rsi), %xmm7
-; AVX512-FCP-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-FCP-NEXT:    vpshufb %xmm1, %xmm7, %xmm1
-; AVX512-FCP-NEXT:    vpbroadcastq 40(%rdi), %xmm7
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm7[1],xmm1[2,3],xmm7[4],xmm1[5],xmm7[6],xmm1[7]
+; AVX512-FCP-NEXT:    vmovdqa 32(%rsi), %ymm11
+; AVX512-FCP-NEXT:    vpshufb %ymm9, %ymm11, %ymm5
+; AVX512-FCP-NEXT:    vmovdqa64 32(%rdi), %ymm30
+; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm6 = ymm30[1,1,2,2]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5],ymm5[6],ymm6[7,8],ymm5[9],ymm6[10,11],ymm5[12],ymm6[13],ymm5[14],ymm6[15]
+; AVX512-FCP-NEXT:    vmovdqa 32(%rsi), %xmm6
+; AVX512-FCP-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-FCP-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
+; AVX512-FCP-NEXT:    vpbroadcastq 40(%rdi), %xmm6
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2,3],xmm6[4],xmm1[5],xmm6[6],xmm1[7]
 ; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm18
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm16
 ; AVX512-FCP-NEXT:    vmovdqa 32(%r8), %ymm1
 ; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm5 = ymm1[0,1,1,1]
-; AVX512-FCP-NEXT:    vpshufb %ymm13, %ymm1, %ymm1
-; AVX512-FCP-NEXT:    vpandn %ymm5, %ymm9, %ymm5
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm31
+; AVX512-FCP-NEXT:    vpshufb %ymm15, %ymm1, %ymm1
+; AVX512-FCP-NEXT:    vpandn %ymm5, %ymm7, %ymm5
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm17
 ; AVX512-FCP-NEXT:    vmovdqa 64(%rcx), %xmm1
 ; AVX512-FCP-NEXT:    vpshufb %xmm0, %xmm1, %xmm5
-; AVX512-FCP-NEXT:    vmovdqa 64(%rdx), %xmm7
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} xmm9 = xmm7[1,2,2,2]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm9[1],xmm5[2],xmm9[3],xmm5[4,5],xmm9[6],xmm5[7]
-; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
-; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm7 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512-FCP-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
-; AVX512-FCP-NEXT:    vmovdqa %xmm7, %xmm9
-; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,1,0,1,8,9,8,8]
-; AVX512-FCP-NEXT:    vpermt2q %zmm5, %zmm7, %zmm1
+; AVX512-FCP-NEXT:    vmovdqa 64(%rdx), %xmm6
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[1,2,2,2]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm7[1],xmm5[2],xmm7[3],xmm5[4,5],xmm7[6],xmm5[7]
+; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm6 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX512-FCP-NEXT:    vpshufb %xmm6, %xmm1, %xmm1
+; AVX512-FCP-NEXT:    vmovdqa %xmm6, %xmm7
+; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,1,0,1,8,9,8,8]
+; AVX512-FCP-NEXT:    vpermt2q %zmm5, %zmm6, %zmm1
 ; AVX512-FCP-NEXT:    vpermq {{.*#+}} zmm20 = zmm2[0,1,0,1,4,5,4,5]
-; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512-FCP-NEXT:    vpternlogq $226, %zmm1, %zmm21, %zmm20
+; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
+; AVX512-FCP-NEXT:    vpternlogq $226, %zmm1, %zmm31, %zmm20
 ; AVX512-FCP-NEXT:    vmovdqa (%rcx), %xmm1
 ; AVX512-FCP-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
 ; AVX512-FCP-NEXT:    vmovdqa (%rdx), %xmm2
 ; AVX512-FCP-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[1,2,2,2]
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm5[1],xmm0[2],xmm5[3],xmm0[4,5],xmm5[6],xmm0[7]
 ; AVX512-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512-FCP-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
-; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm7, %zmm1
-; AVX512-FCP-NEXT:    vpermq {{.*#+}} zmm22 = zmm4[0,1,0,1,4,5,4,5]
-; AVX512-FCP-NEXT:    vpternlogq $226, %zmm1, %zmm21, %zmm22
+; AVX512-FCP-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
+; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm1
+; AVX512-FCP-NEXT:    vpermq {{.*#+}} zmm21 = zmm4[0,1,0,1,4,5,4,5]
+; AVX512-FCP-NEXT:    vpternlogq $226, %zmm1, %zmm31, %zmm21
 ; AVX512-FCP-NEXT:    vmovdqa (%rsi), %ymm2
 ; AVX512-FCP-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-FCP-NEXT:    vmovdqa64 (%rdi), %ymm30
-; AVX512-FCP-NEXT:    vpshufb %ymm12, %ymm2, %ymm0
-; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm30[1,1,2,2]
+; AVX512-FCP-NEXT:    vmovdqa64 (%rdi), %ymm28
+; AVX512-FCP-NEXT:    vpshufb %ymm9, %ymm2, %ymm0
+; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm28[1,1,2,2]
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm30[0,1,2,1,4,5,6,5]
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm28[0,1,2,1,4,5,6,5]
 ; AVX512-FCP-NEXT:    vprolq $16, %ymm2, %ymm2
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3],ymm1[4],ymm2[5,6],ymm1[7],ymm2[8,9],ymm1[10],ymm2[11],ymm1[12],ymm2[13,14],ymm1[15]
 ; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512-FCP-NEXT:    vmovdqa (%rdx), %ymm5
-; AVX512-FCP-NEXT:    vmovdqa (%rcx), %ymm4
-; AVX512-FCP-NEXT:    vpshufb %ymm15, %ymm4, %ymm0
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm5[3,0,3,0,7,4,7,4]
+; AVX512-FCP-NEXT:    vmovdqa (%rdx), %ymm10
+; AVX512-FCP-NEXT:    vmovdqa (%rcx), %ymm6
+; AVX512-FCP-NEXT:    vpshufb %ymm13, %ymm6, %ymm0
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm10[3,0,3,0,7,4,7,4]
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
 ; AVX512-FCP-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25,18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25]
 ; AVX512-FCP-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512-FCP-NEXT:    vpshufb %ymm7, %ymm4, %ymm1
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm2 = ymm5[1,1,1,2,5,5,5,6]
+; AVX512-FCP-NEXT:    vpshufb %ymm7, %ymm6, %ymm1
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm2 = ymm10[1,1,1,2,5,5,5,6]
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5],ymm2[6],ymm1[7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13],ymm2[14],ymm1[15]
 ; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm26
-; AVX512-FCP-NEXT:    vpbroadcastq 16(%r8), %ymm0
-; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
-; AVX512-FCP-NEXT:    vpandnq %ymm0, %ymm17, %ymm0
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm25
+; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
+; AVX512-FCP-NEXT:    vpandnq 16(%r8){1to4}, %ymm12, %ymm0
 ; AVX512-FCP-NEXT:    vmovdqa (%r8), %ymm2
-; AVX512-FCP-NEXT:    vpshufb %ymm13, %ymm2, %ymm1
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm2, %ymm27
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm28
-; AVX512-FCP-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29]
-; AVX512-FCP-NEXT:    # ymm11 = mem[0,1,0,1]
-; AVX512-FCP-NEXT:    vpshufb %ymm11, %ymm3, %ymm0
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm24[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT:    vpshufb %ymm15, %ymm2, %ymm1
+; AVX512-FCP-NEXT:    vmovdqa64 %ymm2, %ymm26
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm27
+; AVX512-FCP-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29]
+; AVX512-FCP-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512-FCP-NEXT:    vpshufb %ymm4, %ymm3, %ymm0
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm23[2,3,2,3,6,7,6,7]
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8],ymm1[9],ymm0[10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
 ; AVX512-FCP-NEXT:    vprolq $16, %ymm3, %ymm1
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm2 = ymm24[0,1,2,1,4,5,6,5]
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm2 = ymm23[0,1,2,1,4,5,6,5]
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8,9],ymm2[10],ymm1[11],ymm2[12],ymm1[13,14],ymm2[15]
-; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [2,3,2,3,10,11,10,10]
-; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm23, %zmm1
-; AVX512-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm9 = [26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31]
-; AVX512-FCP-NEXT:    vpshufb %ymm9, %ymm8, %ymm0
+; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [2,3,2,3,10,11,10,10]
+; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm22, %zmm1
+; AVX512-FCP-NEXT:    vpshufb {{.*#+}} ymm0 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31]
 ; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm2 = ymm19[3,2,3,3,7,6,7,7]
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3,4],ymm0[5,6,7,8],ymm2[9],ymm0[10],ymm2[11,12],ymm0[13,14,15]
 ; AVX512-FCP-NEXT:    vpshufb %ymm7, %ymm8, %ymm2
 ; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm3 = ymm19[1,1,1,2,5,5,5,6]
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5],ymm3[6],ymm2[7,8],ymm3[9],ymm2[10,11],ymm3[12],ymm2[13],ymm3[14],ymm2[15]
-; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [2,3,2,3,10,10,11,10]
-; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm19, %zmm2
-; AVX512-FCP-NEXT:    vpternlogq $226, %zmm1, %zmm21, %zmm2
+; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [2,3,2,3,10,10,11,10]
+; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm18, %zmm2
+; AVX512-FCP-NEXT:    vpternlogq $226, %zmm1, %zmm31, %zmm2
 ; AVX512-FCP-NEXT:    vpbroadcastq 112(%r8), %ymm0
 ; AVX512-FCP-NEXT:    vpbroadcastq 120(%r8), %ymm1
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm16
-; AVX512-FCP-NEXT:    vpternlogd $184, %zmm2, %zmm17, %zmm16
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm14
+; AVX512-FCP-NEXT:    vpternlogd $184, %zmm2, %zmm12, %zmm14
 ; AVX512-FCP-NEXT:    vmovdqa 64(%rsi), %ymm8
-; AVX512-FCP-NEXT:    vpshufb %ymm12, %ymm8, %ymm0
-; AVX512-FCP-NEXT:    vmovdqa64 64(%rdi), %ymm24
-; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm24[1,1,2,2]
+; AVX512-FCP-NEXT:    vpshufb %ymm9, %ymm8, %ymm0
+; AVX512-FCP-NEXT:    vmovdqa64 64(%rdi), %ymm23
+; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm23[1,1,2,2]
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm24[0,1,2,1,4,5,6,5]
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm23[0,1,2,1,4,5,6,5]
 ; AVX512-FCP-NEXT:    vprolq $16, %ymm8, %ymm2
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3],ymm1[4],ymm2[5,6],ymm1[7],ymm2[8,9],ymm1[10],ymm2[11],ymm1[12],ymm2[13,14],ymm1[15]
 ; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm25
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm19
 ; AVX512-FCP-NEXT:    vmovdqa 64(%rcx), %ymm1
-; AVX512-FCP-NEXT:    vpshufb %ymm15, %ymm1, %ymm2
-; AVX512-FCP-NEXT:    vmovdqa 64(%rdx), %ymm0
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm12 = ymm0[3,0,3,0,7,4,7,4]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm12[0],ymm2[1],ymm12[2],ymm2[3],ymm12[4,5],ymm2[6],ymm12[7,8],ymm2[9],ymm12[10],ymm2[11],ymm12[12,13],ymm2[14],ymm12[15]
-; AVX512-FCP-NEXT:    vpshufb %ymm7, %ymm1, %ymm12
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm15 = ymm0[1,1,1,2,5,5,5,6]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0],ymm15[1],ymm12[2,3],ymm15[4],ymm12[5],ymm15[6],ymm12[7,8],ymm15[9],ymm12[10,11],ymm15[12],ymm12[13],ymm15[14],ymm12[15]
-; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm12, %zmm2, %zmm12
-; AVX512-FCP-NEXT:    vpbroadcastq 80(%r8), %ymm2
-; AVX512-FCP-NEXT:    vpandnq %ymm2, %ymm17, %ymm15
-; AVX512-FCP-NEXT:    vmovdqa 64(%r8), %ymm3
-; AVX512-FCP-NEXT:    vpshufb %ymm13, %ymm3, %ymm13
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm15, %zmm13, %zmm15
-; AVX512-FCP-NEXT:    vpshufb %ymm11, %ymm14, %ymm13
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm10 = ymm6[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm10 = ymm13[0],ymm10[1],ymm13[2],ymm10[3],ymm13[4,5],ymm10[6],ymm13[7,8],ymm10[9],ymm13[10],ymm10[11],ymm13[12,13],ymm10[14],ymm13[15]
-; AVX512-FCP-NEXT:    vprolq $16, %ymm14, %ymm13
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,1,2,1,4,5,6,5]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm6 = ymm13[0,1],ymm6[2],ymm13[3],ymm6[4],ymm13[5,6],ymm6[7],ymm13[8,9],ymm6[10],ymm13[11],ymm6[12],ymm13[13,14],ymm6[15]
-; AVX512-FCP-NEXT:    vpermt2q %zmm10, %zmm23, %zmm6
-; AVX512-FCP-NEXT:    vmovdqu (%rsp), %ymm14 # 32-byte Reload
-; AVX512-FCP-NEXT:    vpshufb %ymm9, %ymm14, %ymm10
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm13 = ymm29[3,2,3,3,7,6,7,7]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0],ymm13[1],ymm10[2],ymm13[3,4],ymm10[5,6,7,8],ymm13[9],ymm10[10],ymm13[11,12],ymm10[13,14,15]
-; AVX512-FCP-NEXT:    vpshufb %ymm7, %ymm14, %ymm7
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm13 = ymm29[1,1,1,2,5,5,5,6]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0],ymm13[1],ymm7[2,3],ymm13[4],ymm7[5],ymm13[6],ymm7[7,8],ymm13[9],ymm7[10,11],ymm13[12],ymm7[13],ymm13[14],ymm7[15]
-; AVX512-FCP-NEXT:    vpermt2q %zmm10, %zmm19, %zmm7
-; AVX512-FCP-NEXT:    vpternlogq $226, %zmm6, %zmm21, %zmm7
-; AVX512-FCP-NEXT:    vpbroadcastq 48(%r8), %ymm6
-; AVX512-FCP-NEXT:    vpbroadcastq 56(%r8), %ymm10
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm10, %zmm6, %zmm6
-; AVX512-FCP-NEXT:    vpternlogd $184, %zmm7, %zmm17, %zmm6
-; AVX512-FCP-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX512-FCP-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
-; AVX512-FCP-NEXT:    # xmm7 = xmm7[0],mem[0],xmm7[1],mem[1],xmm7[2],mem[2],xmm7[3],mem[3]
-; AVX512-FCP-NEXT:    vpshufb %ymm9, %ymm1, %ymm1
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[3,2,3,3,7,6,7,7]
+; AVX512-FCP-NEXT:    vpshufb %ymm13, %ymm1, %ymm2
+; AVX512-FCP-NEXT:    vmovdqa 64(%rdx), %ymm3
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm9 = ymm3[3,0,3,0,7,4,7,4]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm9[0],ymm2[1],ymm9[2],ymm2[3],ymm9[4,5],ymm2[6],ymm9[7,8],ymm2[9],ymm9[10],ymm2[11],ymm9[12,13],ymm2[14],ymm9[15]
+; AVX512-FCP-NEXT:    vpshufb %ymm7, %ymm1, %ymm9
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm13 = ymm3[1,1,1,2,5,5,5,6]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0],ymm13[1],ymm9[2,3],ymm13[4],ymm9[5],ymm13[6],ymm9[7,8],ymm13[9],ymm9[10,11],ymm13[12],ymm9[13],ymm13[14],ymm9[15]
+; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm9, %zmm2, %zmm9
+; AVX512-FCP-NEXT:    vmovdqa 64(%r8), %ymm5
+; AVX512-FCP-NEXT:    vpshufb %ymm15, %ymm5, %ymm13
+; AVX512-FCP-NEXT:    vpandnq 80(%r8){1to4}, %ymm12, %ymm24
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm24, %zmm13, %zmm24
+; AVX512-FCP-NEXT:    vpshufb %ymm4, %ymm11, %ymm13
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm15 = ymm30[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm15[1],ymm13[2],ymm15[3],ymm13[4,5],ymm15[6],ymm13[7,8],ymm15[9],ymm13[10],ymm15[11],ymm13[12,13],ymm15[14],ymm13[15]
+; AVX512-FCP-NEXT:    vprolq $16, %ymm11, %ymm11
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm15 = ymm30[0,1,2,1,4,5,6,5]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0,1],ymm15[2],ymm11[3],ymm15[4],ymm11[5,6],ymm15[7],ymm11[8,9],ymm15[10],ymm11[11],ymm15[12],ymm11[13,14],ymm15[15]
+; AVX512-FCP-NEXT:    vpermt2q %zmm13, %zmm22, %zmm11
+; AVX512-FCP-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
+; AVX512-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31]
+; AVX512-FCP-NEXT:    vpshufb %ymm2, %ymm0, %ymm13
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm15 = ymm29[3,2,3,3,7,6,7,7]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm15[1],ymm13[2],ymm15[3,4],ymm13[5,6,7,8],ymm15[9],ymm13[10],ymm15[11,12],ymm13[13,14,15]
+; AVX512-FCP-NEXT:    vpshufb %ymm7, %ymm0, %ymm7
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm15 = ymm29[1,1,1,2,5,5,5,6]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm15 = ymm7[0],ymm15[1],ymm7[2,3],ymm15[4],ymm7[5],ymm15[6],ymm7[7,8],ymm15[9],ymm7[10,11],ymm15[12],ymm7[13],ymm15[14],ymm7[15]
+; AVX512-FCP-NEXT:    vpermt2q %zmm13, %zmm18, %zmm15
+; AVX512-FCP-NEXT:    vpternlogq $226, %zmm11, %zmm31, %zmm15
+; AVX512-FCP-NEXT:    vpbroadcastq 48(%r8), %ymm7
+; AVX512-FCP-NEXT:    vpbroadcastq 56(%r8), %ymm11
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm11, %zmm7, %zmm7
+; AVX512-FCP-NEXT:    vpternlogd $184, %zmm15, %zmm12, %zmm7
+; AVX512-FCP-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX512-FCP-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload
+; AVX512-FCP-NEXT:    # xmm11 = xmm11[0],mem[0],xmm11[1],mem[1],xmm11[2],mem[2],xmm11[3],mem[3]
+; AVX512-FCP-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm0 = ymm3[3,2,3,3,7,6,7,7]
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3,4],ymm1[5,6,7,8],ymm0[9],ymm1[10],ymm0[11,12],ymm1[13,14,15]
-; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm14 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512-FCP-NEXT:    vpshufb %xmm14, %xmm7, %xmm1
-; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [2,2,3,2,8,9,8,9]
-; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm7, %zmm0
+; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm13 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX512-FCP-NEXT:    vpshufb %xmm13, %xmm11, %xmm1
+; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [2,2,3,2,8,9,8,9]
+; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm11, %zmm0
 ; AVX512-FCP-NEXT:    vmovdqa 96(%rdi), %xmm1
 ; AVX512-FCP-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
 ; AVX512-FCP-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
-; AVX512-FCP-NEXT:    vpshufb %ymm11, %ymm8, %ymm8
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm10 = ymm24[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm10[1],ymm8[2],ymm10[3],ymm8[4,5],ymm10[6],ymm8[7,8],ymm10[9],ymm8[10],ymm10[11],ymm8[12,13],ymm10[14],ymm8[15]
-; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm13 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
-; AVX512-FCP-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
-; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [2,3,2,2,8,9,8,9]
-; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm10, %zmm8
+; AVX512-FCP-NEXT:    vmovdqa %ymm4, %ymm15
+; AVX512-FCP-NEXT:    vpshufb %ymm4, %ymm8, %ymm8
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm12 = ymm23[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm12[1],ymm8[2],ymm12[3],ymm8[4,5],ymm12[6],ymm8[7,8],ymm12[9],ymm8[10],ymm12[11],ymm8[12,13],ymm12[14],ymm8[15]
+; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
+; AVX512-FCP-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
+; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [2,3,2,2,8,9,8,9]
+; AVX512-FCP-NEXT:    vpermt2q %zmm1, %zmm12, %zmm8
 ; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
 ; AVX512-FCP-NEXT:    vpternlogq $226, %zmm0, %zmm1, %zmm8
 ; AVX512-FCP-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX512-FCP-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
 ; AVX512-FCP-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX512-FCP-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
-; AVX512-FCP-NEXT:    vpshufb %ymm9, %ymm4, %ymm4
-; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm3[0,1,1,1]
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[3,2,3,3,7,6,7,7]
+; AVX512-FCP-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
+; AVX512-FCP-NEXT:    vpshufb %ymm2, %ymm6, %ymm4
+; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm5[0,1,1,1]
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm5 = ymm10[3,2,3,3,7,6,7,7]
 ; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2],ymm5[3,4],ymm4[5,6,7,8],ymm5[9],ymm4[10],ymm5[11,12],ymm4[13,14,15]
-; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm7, %zmm4
+; AVX512-FCP-NEXT:    vpermt2q %zmm0, %zmm11, %zmm4
 ; AVX512-FCP-NEXT:    vpbroadcastq 88(%r8), %ymm0
 ; AVX512-FCP-NEXT:    vpbroadcastq 96(%r8), %ymm5
 ; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm0
 ; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535]
 ; AVX512-FCP-NEXT:    vpternlogd $184, %zmm8, %zmm5, %zmm0
-; AVX512-FCP-NEXT:    vmovdqa 32(%rdi), %xmm7
-; AVX512-FCP-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
-; AVX512-FCP-NEXT:    # xmm7 = xmm7[0],mem[0],xmm7[1],mem[1],xmm7[2],mem[2],xmm7[3],mem[3]
-; AVX512-FCP-NEXT:    vpshufb %xmm13, %xmm7, %xmm7
+; AVX512-FCP-NEXT:    vmovdqa 32(%rdi), %xmm8
+; AVX512-FCP-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
+; AVX512-FCP-NEXT:    # xmm8 = xmm8[0],mem[0],xmm8[1],mem[1],xmm8[2],mem[2],xmm8[3],mem[3]
+; AVX512-FCP-NEXT:    vpshufb %xmm3, %xmm8, %xmm8
 ; AVX512-FCP-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512-FCP-NEXT:    vpshufb %ymm11, %ymm3, %ymm8
-; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm9 = ymm27[0,1,1,1]
-; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm11 = ymm30[2,3,2,3,6,7,6,7]
-; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm11[1],ymm8[2],ymm11[3],ymm8[4,5],ymm11[6],ymm8[7,8],ymm11[9],ymm8[10],ymm11[11],ymm8[12,13],ymm11[14],ymm8[15]
-; AVX512-FCP-NEXT:    vpermt2q %zmm7, %zmm10, %zmm8
-; AVX512-FCP-NEXT:    vpternlogq $226, %zmm4, %zmm1, %zmm8
+; AVX512-FCP-NEXT:    vpshufb %ymm15, %ymm3, %ymm6
+; AVX512-FCP-NEXT:    vpermq {{.*#+}} ymm10 = ymm26[0,1,1,1]
+; AVX512-FCP-NEXT:    vpshufd {{.*#+}} ymm11 = ymm28[2,3,2,3,6,7,6,7]
+; AVX512-FCP-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0],ymm11[1],ymm6[2],ymm11[3],ymm6[4,5],ymm11[6],ymm6[7,8],ymm11[9],ymm6[10],ymm11[11],ymm6[12,13],ymm11[14],ymm6[15]
+; AVX512-FCP-NEXT:    vpermt2q %zmm8, %zmm12, %zmm6
+; AVX512-FCP-NEXT:    vpternlogq $226, %zmm4, %zmm1, %zmm6
 ; AVX512-FCP-NEXT:    vpbroadcastq 24(%r8), %ymm4
-; AVX512-FCP-NEXT:    vpbroadcastq 32(%r8), %ymm7
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm4
-; AVX512-FCP-NEXT:    vpternlogd $184, %zmm8, %zmm5, %zmm4
+; AVX512-FCP-NEXT:    vpbroadcastq 32(%r8), %ymm8
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm8, %zmm4, %zmm4
+; AVX512-FCP-NEXT:    vpternlogd $184, %zmm6, %zmm5, %zmm4
 ; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0]
 ; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm3 # 64-byte Folded Reload
-; AVX512-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm18 # 64-byte Folded Reload
-; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm16 # 64-byte Folded Reload
 ; AVX512-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512-FCP-NEXT:    vpternlogq $248, %zmm5, %zmm3, %zmm8
-; AVX512-FCP-NEXT:    vpternlogq $248, %zmm5, %zmm18, %zmm31
+; AVX512-FCP-NEXT:    vpternlogq $248, %zmm5, %zmm16, %zmm17
 ; AVX512-FCP-NEXT:    vpbroadcastq 64(%r8), %ymm5
 ; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm5, %zmm2
 ; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
 ; AVX512-FCP-NEXT:    vpternlogd $184, %zmm20, %zmm5, %zmm2
-; AVX512-FCP-NEXT:    vpbroadcastq (%r8), %ymm7
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
-; AVX512-FCP-NEXT:    vpternlogd $184, %zmm22, %zmm5, %zmm7
-; AVX512-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm26 # 64-byte Folded Reload
-; AVX512-FCP-NEXT:    vpternlogq $226, %zmm25, %zmm1, %zmm12
+; AVX512-FCP-NEXT:    vpbroadcastq (%r8), %ymm6
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm10, %zmm6, %zmm6
+; AVX512-FCP-NEXT:    vpternlogd $184, %zmm21, %zmm5, %zmm6
+; AVX512-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm25 # 64-byte Folded Reload
+; AVX512-FCP-NEXT:    vpternlogq $226, %zmm19, %zmm1, %zmm9
 ; AVX512-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512-FCP-NEXT:    vpternlogq $248, %zmm1, %zmm26, %zmm28
-; AVX512-FCP-NEXT:    vpternlogq $248, %zmm1, %zmm12, %zmm15
+; AVX512-FCP-NEXT:    vpternlogq $248, %zmm1, %zmm25, %zmm27
+; AVX512-FCP-NEXT:    vpternlogq $248, %zmm1, %zmm9, %zmm24
 ; AVX512-FCP-NEXT:    vmovdqa64 %zmm4, 128(%r9)
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm6, 256(%r9)
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm7, 256(%r9)
 ; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, 448(%r9)
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm15, 384(%r9)
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm16, 576(%r9)
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm28, 64(%r9)
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm7, (%r9)
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm24, 384(%r9)
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm14, 576(%r9)
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm27, 64(%r9)
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm6, (%r9)
 ; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, 320(%r9)
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm31, 192(%r9)
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm17, 192(%r9)
 ; AVX512-FCP-NEXT:    vmovdqa64 %zmm8, 512(%r9)
 ; AVX512-FCP-NEXT:    addq $360, %rsp # imm = 0x168
 ; AVX512-FCP-NEXT:    vzeroupper
@@ -7403,27 +7392,27 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm3 = [u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u]
 ; AVX512DQ-NEXT:    vpshufb %ymm3, %ymm9, %ymm1
 ; AVX512DQ-NEXT:    vmovdqa64 %ymm3, %ymm20
-; AVX512DQ-NEXT:    vmovdqa 96(%rdi), %ymm5
-; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm3 = ymm5[1,1,2,2]
+; AVX512DQ-NEXT:    vmovdqa 96(%rdi), %ymm4
+; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm3 = ymm4[1,1,2,2]
 ; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm3 = ymm3[0],ymm1[1],ymm3[2,3],ymm1[4],ymm3[5],ymm1[6],ymm3[7,8],ymm1[9],ymm3[10,11],ymm1[12],ymm3[13],ymm1[14],ymm3[15]
 ; AVX512DQ-NEXT:    vmovdqa 96(%rsi), %xmm1
-; AVX512DQ-NEXT:    vmovdqa {{.*#+}} xmm12 = [6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13]
-; AVX512DQ-NEXT:    vpshufb %xmm12, %xmm1, %xmm4
-; AVX512DQ-NEXT:    vpbroadcastq 104(%rdi), %xmm6
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} xmm4 = xmm4[0],xmm6[1],xmm4[2,3],xmm6[4],xmm4[5],xmm6[6],xmm4[7]
-; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,1]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} xmm5 = [6,7,u,u,10,11,6,7,u,u,8,9,u,u,12,13]
+; AVX512DQ-NEXT:    vpshufb %xmm5, %xmm1, %xmm6
+; AVX512DQ-NEXT:    vpbroadcastq 104(%rdi), %xmm8
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2,3],xmm8[4],xmm6[5],xmm8[6],xmm6[7]
+; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm3, %zmm6, %zmm3
 ; AVX512DQ-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-NEXT:    vmovdqa 96(%r8), %ymm3
-; AVX512DQ-NEXT:    vpshufb {{.*#+}} ymm4 = zero,zero,zero,zero,ymm3[12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[16,17],zero,zero,zero,zero,zero,zero
+; AVX512DQ-NEXT:    vpshufb {{.*#+}} ymm6 = zero,zero,zero,zero,ymm3[12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,ymm3[16,17],zero,zero,zero,zero,zero,zero
 ; AVX512DQ-NEXT:    vmovdqa64 {{.*#+}} zmm15 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm3 = ymm3[0,1,1,1]
 ; AVX512DQ-NEXT:    vpandn %ymm3, %ymm15, %ymm3
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm6, %zmm3, %zmm3
 ; AVX512DQ-NEXT:    vmovdqu64 %zmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
-; AVX512DQ-NEXT:    vmovdqa {{.*#+}} xmm13 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512DQ-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
+; AVX512DQ-NEXT:    vmovdqa {{.*#+}} xmm12 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX512DQ-NEXT:    vpshufb %xmm12, %xmm0, %xmm0
 ; AVX512DQ-NEXT:    vmovdqa64 (%rdx), %ymm16
 ; AVX512DQ-NEXT:    vmovdqa 64(%rdx), %ymm2
 ; AVX512DQ-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -7458,7 +7447,7 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4,5],xmm0[6],xmm1[7]
 ; AVX512DQ-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-NEXT:    vmovdqa 64(%rsi), %xmm0
-; AVX512DQ-NEXT:    vpshufb %xmm12, %xmm0, %xmm1
+; AVX512DQ-NEXT:    vpshufb %xmm5, %xmm0, %xmm1
 ; AVX512DQ-NEXT:    vpbroadcastq 72(%rdi), %xmm2
 ; AVX512DQ-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
 ; AVX512DQ-NEXT:    vmovdqa 64(%rdi), %xmm2
@@ -7468,12 +7457,12 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,5,6]
 ; AVX512DQ-NEXT:    vinserti32x4 $2, %xmm1, %zmm2, %zmm1
 ; AVX512DQ-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT:    vpshufb %xmm12, %xmm0, %xmm1
+; AVX512DQ-NEXT:    vpshufb %xmm5, %xmm0, %xmm1
 ; AVX512DQ-NEXT:    vpbroadcastq 8(%rdi), %xmm2
 ; AVX512DQ-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7]
 ; AVX512DQ-NEXT:    vmovdqa (%rdi), %xmm2
 ; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm4 = ymm16[3,2,3,3,7,6,7,7]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm8 = ymm16[3,2,3,3,7,6,7,7]
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,5,6]
 ; AVX512DQ-NEXT:    vinserti32x4 $2, %xmm1, %zmm0, %zmm0
@@ -7481,11 +7470,11 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-NEXT:    vmovdqa (%rcx), %ymm2
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm2[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2],ymm4[3,4],ymm0[5,6,7,8],ymm4[9],ymm0[10],ymm4[11,12],ymm0[13,14,15]
-; AVX512DQ-NEXT:    vmovdqa 32(%rdx), %xmm4
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm8[1],ymm0[2],ymm8[3,4],ymm0[5,6,7,8],ymm8[9],ymm0[10],ymm8[11,12],ymm0[13,14,15]
+; AVX512DQ-NEXT:    vmovdqa 32(%rdx), %xmm8
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,2,3,2]
-; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; AVX512DQ-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3]
+; AVX512DQ-NEXT:    vpshufb %xmm12, %xmm1, %xmm1
 ; AVX512DQ-NEXT:    vshufi64x2 {{.*#+}} zmm25 = zmm0[0,1,2,3],zmm1[0,1,0,1]
 ; AVX512DQ-NEXT:    vmovdqa64 (%rdi), %ymm30
 ; AVX512DQ-NEXT:    vmovdqa (%rsi), %ymm10
@@ -7494,9 +7483,9 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm1 = ymm30[2,3,2,3,6,7,6,7]
 ; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8],ymm1[9],ymm0[10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm0 = ymm0[2,3,2,2]
-; AVX512DQ-NEXT:    vmovdqa 32(%rsi), %xmm8
+; AVX512DQ-NEXT:    vmovdqa 32(%rsi), %xmm13
 ; AVX512DQ-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1],xmm1[2],xmm8[2],xmm1[3],xmm8[3]
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3]
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,6]
 ; AVX512DQ-NEXT:    vshufi64x2 {{.*#+}} zmm27 = zmm0[0,1,2,3],zmm1[0,1,0,1]
@@ -7506,62 +7495,62 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm14 = ymm19[3,0,3,0,7,4,7,4]
 ; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm0 = ymm14[0],ymm0[1],ymm14[2],ymm0[3],ymm14[4,5],ymm0[6],ymm14[7,8],ymm0[9],ymm14[10],ymm0[11],ymm14[12,13],ymm0[14],ymm14[15]
 ; AVX512DQ-NEXT:    vpshufb %xmm7, %xmm6, %xmm6
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,2,2,2]
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} xmm4 = xmm6[0],xmm4[1],xmm6[2],xmm4[3],xmm6[4,5],xmm4[6],xmm6[7]
-; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[0,1,0,0]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm0, %zmm4, %zmm0
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm8 = xmm8[1,2,2,2]
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} xmm6 = xmm6[0],xmm8[1],xmm6[2],xmm8[3],xmm6[4,5],xmm8[6],xmm6[7]
+; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[0,1,0,0]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm0, %zmm6, %zmm0
 ; AVX512DQ-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-NEXT:    vmovdqa 32(%rdi), %ymm4
+; AVX512DQ-NEXT:    vmovdqa 32(%rdi), %ymm6
 ; AVX512DQ-NEXT:    vmovdqa 32(%rsi), %ymm0
-; AVX512DQ-NEXT:    vmovdqa64 %ymm20, %ymm6
-; AVX512DQ-NEXT:    vpshufb %ymm6, %ymm0, %ymm6
-; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm14 = ymm4[1,1,2,2]
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm6 = ymm14[0],ymm6[1],ymm14[2,3],ymm6[4],ymm14[5],ymm6[6],ymm14[7,8],ymm6[9],ymm14[10,11],ymm6[12],ymm14[13],ymm6[14],ymm14[15]
-; AVX512DQ-NEXT:    vpshufb %xmm12, %xmm8, %xmm8
-; AVX512DQ-NEXT:    vpbroadcastq 40(%rdi), %xmm12
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} xmm8 = xmm8[0],xmm12[1],xmm8[2,3],xmm12[4],xmm8[5],xmm12[6],xmm8[7]
-; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm8 = ymm8[0,1,0,1]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm6, %zmm8, %zmm24
-; AVX512DQ-NEXT:    vmovdqa 32(%r8), %ymm6
-; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm8 = ymm6[0,1,1,1]
+; AVX512DQ-NEXT:    vmovdqa64 %ymm20, %ymm8
+; AVX512DQ-NEXT:    vpshufb %ymm8, %ymm0, %ymm8
+; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm14 = ymm6[1,1,2,2]
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm8 = ymm14[0],ymm8[1],ymm14[2,3],ymm8[4],ymm14[5],ymm8[6],ymm14[7,8],ymm8[9],ymm14[10,11],ymm8[12],ymm14[13],ymm8[14],ymm14[15]
+; AVX512DQ-NEXT:    vpshufb %xmm5, %xmm13, %xmm5
+; AVX512DQ-NEXT:    vpbroadcastq 40(%rdi), %xmm13
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm13[1],xmm5[2,3],xmm13[4],xmm5[5],xmm13[6],xmm5[7]
+; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[0,1,0,1]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm24
+; AVX512DQ-NEXT:    vmovdqa 32(%r8), %ymm5
+; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm8 = ymm5[0,1,1,1]
 ; AVX512DQ-NEXT:    vmovdqa {{.*#+}} ymm14 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
-; AVX512DQ-NEXT:    vpshufb %ymm14, %ymm6, %ymm6
+; AVX512DQ-NEXT:    vpshufb %ymm14, %ymm5, %ymm5
 ; AVX512DQ-NEXT:    vpandn %ymm8, %ymm15, %ymm8
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm6, %zmm8, %zmm26
-; AVX512DQ-NEXT:    vpshufb %xmm13, %xmm3, %xmm3
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm5, %zmm8, %zmm26
+; AVX512DQ-NEXT:    vpshufb %xmm12, %xmm3, %xmm3
 ; AVX512DQ-NEXT:    vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-NEXT:    vmovdqa (%rcx), %xmm3
-; AVX512DQ-NEXT:    vmovdqa (%rdx), %xmm6
-; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} xmm12 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3]
-; AVX512DQ-NEXT:    vpshufb %xmm13, %xmm12, %xmm8
-; AVX512DQ-NEXT:    vmovdqa64 %ymm8, %ymm28
+; AVX512DQ-NEXT:    vmovdqa (%rdx), %xmm5
+; AVX512DQ-NEXT:    vpunpcklwd {{.*#+}} xmm13 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
+; AVX512DQ-NEXT:    vpshufb %xmm12, %xmm13, %xmm8
+; AVX512DQ-NEXT:    vmovdqa64 %ymm8, %ymm29
 ; AVX512DQ-NEXT:    vpshufb %xmm7, %xmm3, %xmm3
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm6 = xmm6[1,2,2,2]
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm6[1],xmm3[2],xmm6[3],xmm3[4,5],xmm6[6],xmm3[7]
-; AVX512DQ-NEXT:    vmovdqa64 %ymm3, %ymm23
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm3 = ymm5[0,1,2,1,4,5,6,5]
-; AVX512DQ-NEXT:    vprolq $16, %ymm9, %ymm6
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm3 = ymm6[0,1],ymm3[2],ymm6[3],ymm3[4],ymm6[5,6],ymm3[7],ymm6[8,9],ymm3[10],ymm6[11],ymm3[12],ymm6[13,14],ymm3[15]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,2,2,2]
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} xmm3 = xmm3[0],xmm5[1],xmm3[2],xmm5[3],xmm3[4,5],xmm5[6],xmm3[7]
+; AVX512DQ-NEXT:    vmovdqa64 %ymm3, %ymm28
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm3 = ymm4[0,1,2,1,4,5,6,5]
+; AVX512DQ-NEXT:    vprolq $16, %ymm9, %ymm5
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm3 = ymm5[0,1],ymm3[2],ymm5[3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8,9],ymm3[10],ymm5[11],ymm3[12],ymm5[13,14],ymm3[15]
 ; AVX512DQ-NEXT:    vmovdqa64 %ymm3, %ymm22
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm9[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,2,6,7,6,6]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm13 = ymm3[0],ymm5[1],ymm3[2],ymm5[3],ymm3[4,5],ymm5[6],ymm3[7,8],ymm5[9],ymm3[10],ymm5[11],ymm3[12,13],ymm5[14],ymm3[15]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm4 = ymm4[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm13 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4,5],ymm4[6],ymm3[7,8],ymm4[9],ymm3[10],ymm4[11],ymm3[12,13],ymm4[14],ymm3[15]
 ; AVX512DQ-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25,18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25]
 ; AVX512DQ-NEXT:    # ymm7 = mem[0,1,0,1]
 ; AVX512DQ-NEXT:    vpshufb %ymm7, %ymm11, %ymm3
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm6 = ymm17[1,1,1,2,5,5,5,6]
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm12 = ymm3[0],ymm6[1],ymm3[2,3],ymm6[4],ymm3[5],ymm6[6],ymm3[7,8],ymm6[9],ymm3[10,11],ymm6[12],ymm3[13],ymm6[14],ymm3[15]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm5 = ymm17[1,1,1,2,5,5,5,6]
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm12 = ymm3[0],ymm5[1],ymm3[2,3],ymm5[4],ymm3[5],ymm5[6],ymm3[7,8],ymm5[9],ymm3[10,11],ymm5[12],ymm3[13],ymm5[14],ymm3[15]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} ymm3 = ymm11[0,1,2,3,5,6,7,7,8,9,10,11,13,14,15,15]
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm3 = ymm3[2,3,2,3,6,7,6,7]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm6 = ymm17[3,2,3,3,7,6,7,7]
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm11 = ymm3[0],ymm6[1],ymm3[2],ymm6[3,4],ymm3[5,6,7,8],ymm6[9],ymm3[10],ymm6[11,12],ymm3[13,14,15]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm3 = ymm4[0,1,2,1,4,5,6,5]
-; AVX512DQ-NEXT:    vprolq $16, %ymm0, %ymm6
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm15 = ymm6[0,1],ymm3[2],ymm6[3],ymm3[4],ymm6[5,6],ymm3[7],ymm6[8,9],ymm3[10],ymm6[11],ymm3[12],ymm6[13,14],ymm3[15]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm5 = ymm17[3,2,3,3,7,6,7,7]
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm11 = ymm3[0],ymm5[1],ymm3[2],ymm5[3,4],ymm3[5,6,7,8],ymm5[9],ymm3[10],ymm5[11,12],ymm3[13,14,15]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm3 = ymm6[0,1,2,1,4,5,6,5]
+; AVX512DQ-NEXT:    vprolq $16, %ymm0, %ymm5
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm15 = ymm5[0,1],ymm3[2],ymm5[3],ymm3[4],ymm5[5,6],ymm3[7],ymm5[8,9],ymm3[10],ymm5[11],ymm3[12],ymm5[13,14],ymm3[15]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,7,6,5,7,8,9,10,11,15,14,13,15]
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[2,3,2,2,6,7,6,6]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm3 = ymm4[2,3,2,3,6,7,6,7]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm3 = ymm6[2,3,2,3,6,7,6,7]
 ; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4,5],ymm3[6],ymm0[7,8],ymm3[9],ymm0[10],ymm3[11],ymm0[12,13],ymm3[14],ymm0[15]
 ; AVX512DQ-NEXT:    vmovdqa64 %ymm0, %ymm21
 ; AVX512DQ-NEXT:    vpshufb %ymm7, %ymm1, %ymm0
@@ -7590,11 +7579,10 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm9
 ; AVX512DQ-NEXT:    vmovdqa64 {{.*#+}} zmm16 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
-; AVX512DQ-NEXT:    vpbroadcastq 16(%r8), %ymm0
-; AVX512DQ-NEXT:    vpandnq %ymm0, %ymm16, %ymm0
+; AVX512DQ-NEXT:    vpandnq 16(%r8){1to4}, %ymm16, %ymm0
 ; AVX512DQ-NEXT:    vmovdqa (%r8), %ymm10
 ; AVX512DQ-NEXT:    vpshufb %ymm14, %ymm10, %ymm1
-; AVX512DQ-NEXT:    vmovdqa64 %ymm14, %ymm29
+; AVX512DQ-NEXT:    vmovdqa64 %ymm14, %ymm23
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm17
 ; AVX512DQ-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload
 ; AVX512DQ-NEXT:    vpshufb %ymm5, %ymm2, %ymm0
@@ -7610,7 +7598,7 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm1 = ymm3[3,0,3,0,7,4,7,4]
 ; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
-; AVX512DQ-NEXT:    vpshufb %ymm7, %ymm2, %ymm5
+; AVX512DQ-NEXT:    vpshufb %ymm7, %ymm2, %ymm4
 ; AVX512DQ-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
 ; AVX512DQ-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm19, %zmm31 # 64-byte Folded Reload
 ; AVX512DQ-NEXT:    vpbroadcastq 88(%r8), %ymm1
@@ -7631,8 +7619,8 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-NEXT:    # ymm18 = mem[0,1,0,1]
 ; AVX512DQ-NEXT:    vpermq $4, {{[-0-9]+}}(%r{{[sb]}}p), %ymm25 # 32-byte Folded Reload
 ; AVX512DQ-NEXT:    # ymm25 = mem[0,1,0,0]
-; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm27 = ymm28[0,1,0,1]
-; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm28 = ymm23[0,1,0,0]
+; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm27 = ymm29[0,1,0,1]
+; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm28 = ymm28[0,1,0,0]
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm10 = ymm10[0,1,1,1]
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm14 = ymm22[2,3,2,3]
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm13 = ymm13[2,3,2,2]
@@ -7641,38 +7629,37 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm15 = ymm15[2,3,2,3]
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm7 = ymm21[2,3,2,2]
 ; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm6 = ymm6[2,3,2,3]
-; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm4 = ymm8[2,2,3,2]
+; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm5 = ymm8[2,2,3,2]
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm8 = ymm3[1,1,1,2,5,5,5,6]
-; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm5 = ymm5[0],ymm8[1],ymm5[2,3],ymm8[4],ymm5[5],ymm8[6],ymm5[7,8],ymm8[9],ymm5[10,11],ymm8[12],ymm5[13],ymm8[14],ymm5[15]
-; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm5 = ymm5[2,3,2,3]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm0
-; AVX512DQ-NEXT:    vmovdqa 64(%r8), %ymm5
-; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm8 = ymm5[0,1,1,1]
-; AVX512DQ-NEXT:    vmovdqa64 %ymm29, %ymm3
-; AVX512DQ-NEXT:    vpshufb %ymm3, %ymm5, %ymm5
-; AVX512DQ-NEXT:    vpbroadcastq 80(%r8), %ymm29
-; AVX512DQ-NEXT:    vpandnq %ymm29, %ymm16, %ymm29
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm29, %zmm5, %zmm5
-; AVX512DQ-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512DQ-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm8[1],ymm4[2,3],ymm8[4],ymm4[5],ymm8[6],ymm4[7,8],ymm8[9],ymm4[10,11],ymm8[12],ymm4[13],ymm8[14],ymm4[15]
+; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm4 = ymm4[2,3,2,3]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512DQ-NEXT:    vmovdqa 64(%r8), %ymm4
+; AVX512DQ-NEXT:    vpermq {{.*#+}} ymm8 = ymm4[0,1,1,1]
+; AVX512DQ-NEXT:    vmovdqa64 %ymm23, %ymm3
+; AVX512DQ-NEXT:    vpshufb %ymm3, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpandnq 80(%r8){1to4}, %ymm16, %ymm29
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm29, %zmm4, %zmm4
+; AVX512DQ-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512DQ-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm23 # 64-byte Reload
-; AVX512DQ-NEXT:    vpternlogq $248, %zmm22, %zmm31, %zmm23
-; AVX512DQ-NEXT:    vpternlogq $248, %zmm22, %zmm24, %zmm26
+; AVX512DQ-NEXT:    vpternlogq $248, %zmm21, %zmm31, %zmm23
+; AVX512DQ-NEXT:    vpternlogq $248, %zmm21, %zmm24, %zmm26
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm25, %zmm18, %zmm18
-; AVX512DQ-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Folded Reload
-; AVX512DQ-NEXT:    # zmm22 = mem[0,1,0,1,4,5,4,5]
+; AVX512DQ-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Folded Reload
+; AVX512DQ-NEXT:    # zmm21 = mem[0,1,0,1,4,5,4,5]
 ; AVX512DQ-NEXT:    vmovdqa64 {{.*#+}} zmm24 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512DQ-NEXT:    vpternlogq $226, %zmm18, %zmm24, %zmm22
+; AVX512DQ-NEXT:    vpternlogq $226, %zmm18, %zmm24, %zmm21
 ; AVX512DQ-NEXT:    vpbroadcastq 64(%r8), %ymm18
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm8, %zmm18, %zmm8
 ; AVX512DQ-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
-; AVX512DQ-NEXT:    vpternlogd $184, %zmm22, %zmm18, %zmm8
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm28, %zmm27, %zmm22
-; AVX512DQ-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm21 # 64-byte Folded Reload
-; AVX512DQ-NEXT:    # zmm21 = mem[0,1,0,1,4,5,4,5]
-; AVX512DQ-NEXT:    vpternlogq $226, %zmm22, %zmm24, %zmm21
-; AVX512DQ-NEXT:    vpbroadcastq (%r8), %ymm22
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm10, %zmm22, %zmm10
-; AVX512DQ-NEXT:    vpternlogd $184, %zmm21, %zmm18, %zmm10
+; AVX512DQ-NEXT:    vpternlogd $184, %zmm21, %zmm18, %zmm8
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm28, %zmm27, %zmm21
+; AVX512DQ-NEXT:    vpermq $68, {{[-0-9]+}}(%r{{[sb]}}p), %zmm22 # 64-byte Folded Reload
+; AVX512DQ-NEXT:    # zmm22 = mem[0,1,0,1,4,5,4,5]
+; AVX512DQ-NEXT:    vpternlogq $226, %zmm21, %zmm24, %zmm22
+; AVX512DQ-NEXT:    vpbroadcastq (%r8), %ymm21
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm10, %zmm21, %zmm10
+; AVX512DQ-NEXT:    vpternlogd $184, %zmm22, %zmm18, %zmm10
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm13, %zmm14, %zmm13
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm11, %zmm12, %zmm11
 ; AVX512DQ-NEXT:    vpternlogq $226, %zmm13, %zmm24, %zmm11
@@ -7681,18 +7668,18 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm12
 ; AVX512DQ-NEXT:    vpternlogd $184, %zmm11, %zmm16, %zmm12
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm7, %zmm15, %zmm7
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm4, %zmm6, %zmm4
-; AVX512DQ-NEXT:    vpternlogq $226, %zmm7, %zmm24, %zmm4
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm5, %zmm6, %zmm5
+; AVX512DQ-NEXT:    vpternlogq $226, %zmm7, %zmm24, %zmm5
 ; AVX512DQ-NEXT:    vpbroadcastq 48(%r8), %ymm6
 ; AVX512DQ-NEXT:    vpbroadcastq 56(%r8), %ymm7
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
-; AVX512DQ-NEXT:    vpternlogd $184, %zmm4, %zmm16, %zmm6
+; AVX512DQ-NEXT:    vpternlogd $184, %zmm5, %zmm16, %zmm6
 ; AVX512DQ-NEXT:    vpternlogq $226, %zmm30, %zmm19, %zmm9
 ; AVX512DQ-NEXT:    vpternlogq $226, %zmm20, %zmm19, %zmm0
-; AVX512DQ-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512DQ-NEXT:    vpternlogq $248, %zmm4, %zmm9, %zmm17
-; AVX512DQ-NEXT:    vpternlogq $248, %zmm4, %zmm0, %zmm5
-; AVX512DQ-NEXT:    vmovdqa64 %zmm5, 384(%r9)
+; AVX512DQ-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
+; AVX512DQ-NEXT:    vpternlogq $248, %zmm5, %zmm9, %zmm17
+; AVX512DQ-NEXT:    vpternlogq $248, %zmm5, %zmm0, %zmm4
+; AVX512DQ-NEXT:    vmovdqa64 %zmm4, 384(%r9)
 ; AVX512DQ-NEXT:    vmovdqa64 %zmm17, 64(%r9)
 ; AVX512DQ-NEXT:    vmovdqa64 %zmm6, 256(%r9)
 ; AVX512DQ-NEXT:    vmovdqa64 %zmm12, 576(%r9)
@@ -7710,8 +7697,8 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FCP:       # %bb.0:
 ; AVX512DQ-FCP-NEXT:    subq $360, %rsp # imm = 0x168
 ; AVX512DQ-FCP-NEXT:    vmovdqa 96(%rcx), %ymm8
-; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm15 = [u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u]
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm15, %ymm8, %ymm0
+; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm13 = [u,u,12,13,u,u,0,1,u,u,u,u,14,15,u,u,u,u,28,29,u,u,16,17,u,u,u,u,30,31,u,u]
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm13, %ymm8, %ymm0
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 96(%rdx), %ymm19
 ; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm19[3,0,3,0,7,4,7,4]
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
@@ -7727,10 +7714,10 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
 ; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FCP-NEXT:    vmovdqa 96(%rsi), %ymm3
-; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm12 = [u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u]
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm12, %ymm3, %ymm1
-; AVX512DQ-FCP-NEXT:    vmovdqa64 96(%rdi), %ymm24
-; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm24[1,1,2,2]
+; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm9 = [u,u,0,1,u,u,u,u,14,15,u,u,2,3,u,u,u,u,16,17,u,u,u,u,30,31,u,u,18,19,u,u]
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm9, %ymm3, %ymm1
+; AVX512DQ-FCP-NEXT:    vmovdqa64 96(%rdi), %ymm23
+; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm23[1,1,2,2]
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm1[1],ymm2[2,3],ymm1[4],ymm2[5],ymm1[6],ymm2[7,8],ymm1[9],ymm2[10,11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
 ; AVX512DQ-FCP-NEXT:    vmovdqa 96(%rsi), %xmm4
 ; AVX512DQ-FCP-NEXT:    vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -7742,17 +7729,17 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm4, %zmm2
 ; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FCP-NEXT:    vmovdqa 96(%r8), %ymm2
-; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm13 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm13, %ymm2, %ymm4
+; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm15 = [128,128,128,128,12,13,128,128,128,128,128,128,128,128,14,15,128,128,128,128,128,128,128,128,16,17,128,128,128,128,128,128]
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm15, %ymm2, %ymm4
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm2[0,1,1,1]
 ; AVX512DQ-FCP-NEXT:    vpandn %ymm2, %ymm5, %ymm2
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm5, %zmm9
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm5, %zmm7
 ; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
 ; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
 ; AVX512DQ-FCP-NEXT:    vmovdqa 32(%rcx), %ymm2
 ; AVX512DQ-FCP-NEXT:    vmovdqu %ymm2, (%rsp) # 32-byte Spill
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm15, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm13, %ymm2, %ymm2
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 32(%rdx), %ymm29
 ; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm4 = ymm29[3,0,3,0,7,4,7,4]
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4,5],ymm2[6],ymm4[7,8],ymm2[9],ymm4[10],ymm2[11],ymm4[12,13],ymm2[14],ymm4[15]
@@ -7774,7 +7761,7 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm2 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
 ; AVX512DQ-FCP-NEXT:    vpshufb %xmm5, %xmm2, %xmm2
-; AVX512DQ-FCP-NEXT:    vmovdqa %xmm5, %xmm7
+; AVX512DQ-FCP-NEXT:    vmovdqa %xmm5, %xmm10
 ; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
 ; AVX512DQ-FCP-NEXT:    vmovdqa (%rsi), %xmm4
 ; AVX512DQ-FCP-NEXT:    vpshufb %xmm1, %xmm4, %xmm5
@@ -7782,229 +7769,228 @@ define void @store_i16_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm6[1],xmm5[2,3],xmm6[4],xmm5[5],xmm6[6],xmm5[7]
 ; AVX512DQ-FCP-NEXT:    vmovdqa (%rdi), %xmm6
 ; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm7, %xmm4, %xmm4
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm10, %xmm4, %xmm4
 ; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512DQ-FCP-NEXT:    vmovdqa 32(%rsi), %ymm14
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm12, %ymm14, %ymm5
-; AVX512DQ-FCP-NEXT:    vmovdqa 32(%rdi), %ymm6
-; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm7 = ymm6[1,1,2,2]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm5 = ymm7[0],ymm5[1],ymm7[2,3],ymm5[4],ymm7[5],ymm5[6],ymm7[7,8],ymm5[9],ymm7[10,11],ymm5[12],ymm7[13],ymm5[14],ymm7[15]
-; AVX512DQ-FCP-NEXT:    vmovdqa 32(%rsi), %xmm7
-; AVX512DQ-FCP-NEXT:    vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm1, %xmm7, %xmm1
-; AVX512DQ-FCP-NEXT:    vpbroadcastq 40(%rdi), %xmm7
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm7[1],xmm1[2,3],xmm7[4],xmm1[5],xmm7[6],xmm1[7]
+; AVX512DQ-FCP-NEXT:    vmovdqa 32(%rsi), %ymm11
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm9, %ymm11, %ymm5
+; AVX512DQ-FCP-NEXT:    vmovdqa64 32(%rdi), %ymm30
+; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm6 = ymm30[1,1,2,2]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2,3],ymm5[4],ymm6[5],ymm5[6],ymm6[7,8],ymm5[9],ymm6[10,11],ymm5[12],ymm6[13],ymm5[14],ymm6[15]
+; AVX512DQ-FCP-NEXT:    vmovdqa 32(%rsi), %xmm6
+; AVX512DQ-FCP-NEXT:    vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm1, %xmm6, %xmm1
+; AVX512DQ-FCP-NEXT:    vpbroadcastq 40(%rdi), %xmm6
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm1 = xmm1[0],xmm6[1],xmm1[2,3],xmm6[4],xmm1[5],xmm6[6],xmm1[7]
 ; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[0,1,0,1]
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm18
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm5, %zmm1, %zmm16
 ; AVX512DQ-FCP-NEXT:    vmovdqa 32(%r8), %ymm1
 ; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm5 = ymm1[0,1,1,1]
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm13, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT:    vpandn %ymm5, %ymm9, %ymm5
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm31
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm15, %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT:    vpandn %ymm5, %ymm7, %ymm5
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm5, %zmm17
 ; AVX512DQ-FCP-NEXT:    vmovdqa 64(%rcx), %xmm1
 ; AVX512DQ-FCP-NEXT:    vpshufb %xmm0, %xmm1, %xmm5
-; AVX512DQ-FCP-NEXT:    vmovdqa 64(%rdx), %xmm7
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} xmm9 = xmm7[1,2,2,2]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm9[1],xmm5[2],xmm9[3],xmm5[4,5],xmm9[6],xmm5[7]
-; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
-; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm7 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT:    vmovdqa %xmm7, %xmm9
-; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [0,1,0,1,8,9,8,8]
-; AVX512DQ-FCP-NEXT:    vpermt2q %zmm5, %zmm7, %zmm1
+; AVX512DQ-FCP-NEXT:    vmovdqa 64(%rdx), %xmm6
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} xmm7 = xmm6[1,2,2,2]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm5 = xmm5[0],xmm7[1],xmm5[2],xmm7[3],xmm5[4,5],xmm7[6],xmm5[7]
+; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm6 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm6, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT:    vmovdqa %xmm6, %xmm7
+; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm6 = [0,1,0,1,8,9,8,8]
+; AVX512DQ-FCP-NEXT:    vpermt2q %zmm5, %zmm6, %zmm1
 ; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} zmm20 = zmm2[0,1,0,1,4,5,4,5]
-; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm21 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm1, %zmm21, %zmm20
+; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm31 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535]
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm1, %zmm31, %zmm20
 ; AVX512DQ-FCP-NEXT:    vmovdqa (%rcx), %xmm1
 ; AVX512DQ-FCP-NEXT:    vpshufb %xmm0, %xmm1, %xmm0
 ; AVX512DQ-FCP-NEXT:    vmovdqa (%rdx), %xmm2
 ; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[1,2,2,2]
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm5[1],xmm0[2],xmm5[3],xmm0[4,5],xmm5[6],xmm0[7]
 ; AVX512DQ-FCP-NEXT:    vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm9, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm7, %zmm1
-; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} zmm22 = zmm4[0,1,0,1,4,5,4,5]
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm1, %zmm21, %zmm22
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm7, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm6, %zmm1
+; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} zmm21 = zmm4[0,1,0,1,4,5,4,5]
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm1, %zmm31, %zmm21
 ; AVX512DQ-FCP-NEXT:    vmovdqa (%rsi), %ymm2
 ; AVX512DQ-FCP-NEXT:    vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rdi), %ymm30
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm12, %ymm2, %ymm0
-; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm30[1,1,2,2]
+; AVX512DQ-FCP-NEXT:    vmovdqa64 (%rdi), %ymm28
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm9, %ymm2, %ymm0
+; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm28[1,1,2,2]
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm30[0,1,2,1,4,5,6,5]
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm28[0,1,2,1,4,5,6,5]
 ; AVX512DQ-FCP-NEXT:    vprolq $16, %ymm2, %ymm2
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3],ymm1[4],ymm2[5,6],ymm1[7],ymm2[8,9],ymm1[10],ymm2[11],ymm1[12],ymm2[13,14],ymm1[15]
 ; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
 ; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
 ; AVX512DQ-FCP-NEXT:    vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
-; AVX512DQ-FCP-NEXT:    vmovdqa (%rdx), %ymm5
-; AVX512DQ-FCP-NEXT:    vmovdqa (%rcx), %ymm4
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm15, %ymm4, %ymm0
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm5[3,0,3,0,7,4,7,4]
+; AVX512DQ-FCP-NEXT:    vmovdqa (%rdx), %ymm10
+; AVX512DQ-FCP-NEXT:    vmovdqa (%rcx), %ymm6
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm13, %ymm6, %ymm0
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm10[3,0,3,0,7,4,7,4]
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15]
 ; AVX512DQ-FCP-NEXT:    vbroadcasti128 {{.*#+}} ymm7 = [18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25,18,19,0,0,22,23,22,23,0,0,20,21,0,0,24,25]
 ; AVX512DQ-FCP-NEXT:    # ymm7 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm7, %ymm4, %ymm1
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm2 = ymm5[1,1,1,2,5,5,5,6]
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm7, %ymm6, %ymm1
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm2 = ymm10[1,1,1,2,5,5,5,6]
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5],ymm2[6],ymm1[7,8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13],ymm2[14],ymm1[15]
 ; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm26
-; AVX512DQ-FCP-NEXT:    vpbroadcastq 16(%r8), %ymm0
-; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm17 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
-; AVX512DQ-FCP-NEXT:    vpandnq %ymm0, %ymm17, %ymm0
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm25
+; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0]
+; AVX512DQ-FCP-NEXT:    vpandnq 16(%r8){1to4}, %ymm12, %ymm0
 ; AVX512DQ-FCP-NEXT:    vmovdqa (%r8), %ymm2
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm13, %ymm2, %ymm1
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm2, %ymm27
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm28
-; AVX512DQ-FCP-NEXT:    vbroadcasti128 {{.*#+}} ymm11 = [30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29]
-; AVX512DQ-FCP-NEXT:    # ymm11 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm11, %ymm3, %ymm0
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm24[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm15, %ymm2, %ymm1
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm2, %ymm26
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm27
+; AVX512DQ-FCP-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29,30,31,28,29,26,27,30,31,30,31,28,29,30,31,28,29]
+; AVX512DQ-FCP-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm4, %ymm3, %ymm0
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm23[2,3,2,3,6,7,6,7]
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7,8],ymm1[9],ymm0[10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15]
 ; AVX512DQ-FCP-NEXT:    vprolq $16, %ymm3, %ymm1
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm2 = ymm24[0,1,2,1,4,5,6,5]
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm2 = ymm23[0,1,2,1,4,5,6,5]
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8,9],ymm2[10],ymm1[11],ymm2[12],ymm1[13,14],ymm2[15]
-; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm23 = [2,3,2,3,10,11,10,10]
-; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm23, %zmm1
-; AVX512DQ-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm9 = [26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31]
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm9, %ymm8, %ymm0
+; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm22 = [2,3,2,3,10,11,10,10]
+; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm22, %zmm1
+; AVX512DQ-FCP-NEXT:    vpshufb {{.*#+}} ymm0 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31]
 ; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm2 = ymm19[3,2,3,3,7,6,7,7]
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3,4],ymm0[5,6,7,8],ymm2[9],ymm0[10],ymm2[11,12],ymm0[13,14,15]
 ; AVX512DQ-FCP-NEXT:    vpshufb %ymm7, %ymm8, %ymm2
 ; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm3 = ymm19[1,1,1,2,5,5,5,6]
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3],ymm3[4],ymm2[5],ymm3[6],ymm2[7,8],ymm3[9],ymm2[10,11],ymm3[12],ymm2[13],ymm3[14],ymm2[15]
-; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm19 = [2,3,2,3,10,10,11,10]
-; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm19, %zmm2
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm1, %zmm21, %zmm2
+; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm18 = [2,3,2,3,10,10,11,10]
+; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm18, %zmm2
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm1, %zmm31, %zmm2
 ; AVX512DQ-FCP-NEXT:    vpbroadcastq 112(%r8), %ymm0
 ; AVX512DQ-FCP-NEXT:    vpbroadcastq 120(%r8), %ymm1
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm16
-; AVX512DQ-FCP-NEXT:    vpternlogd $184, %zmm2, %zmm17, %zmm16
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm14
+; AVX512DQ-FCP-NEXT:    vpternlogd $184, %zmm2, %zmm12, %zmm14
 ; AVX512DQ-FCP-NEXT:    vmovdqa 64(%rsi), %ymm8
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm12, %ymm8, %ymm0
-; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%rdi), %ymm24
-; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm24[1,1,2,2]
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm9, %ymm8, %ymm0
+; AVX512DQ-FCP-NEXT:    vmovdqa64 64(%rdi), %ymm23
+; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm23[1,1,2,2]
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5],ymm0[6],ymm1[7,8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm24[0,1,2,1,4,5,6,5]
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm1 = ymm23[0,1,2,1,4,5,6,5]
 ; AVX512DQ-FCP-NEXT:    vprolq $16, %ymm8, %ymm2
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3],ymm1[4],ymm2[5,6],ymm1[7],ymm2[8,9],ymm1[10],ymm2[11],ymm1[12],ymm2[13,14],ymm1[15]
 ; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm1 = ymm1[2,3,2,3]
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm25
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm19
 ; AVX512DQ-FCP-NEXT:    vmovdqa 64(%rcx), %ymm1
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm15, %ymm1, %ymm2
-; AVX512DQ-FCP-NEXT:    vmovdqa 64(%rdx), %ymm0
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm12 = ymm0[3,0,3,0,7,4,7,4]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm12[0],ymm2[1],ymm12[2],ymm2[3],ymm12[4,5],ymm2[6],ymm12[7,8],ymm2[9],ymm12[10],ymm2[11],ymm12[12,13],ymm2[14],ymm12[15]
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm7, %ymm1, %ymm12
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm15 = ymm0[1,1,1,2,5,5,5,6]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm12 = ymm12[0],ymm15[1],ymm12[2,3],ymm15[4],ymm12[5],ymm15[6],ymm12[7,8],ymm15[9],ymm12[10,11],ymm15[12],ymm12[13],ymm15[14],ymm12[15]
-; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm12 = ymm12[2,3,2,3]
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm12, %zmm2, %zmm12
-; AVX512DQ-FCP-NEXT:    vpbroadcastq 80(%r8), %ymm2
-; AVX512DQ-FCP-NEXT:    vpandnq %ymm2, %ymm17, %ymm15
-; AVX512DQ-FCP-NEXT:    vmovdqa 64(%r8), %ymm3
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm13, %ymm3, %ymm13
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm15, %zmm13, %zmm15
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm11, %ymm14, %ymm13
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm10 = ymm6[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm10 = ymm13[0],ymm10[1],ymm13[2],ymm10[3],ymm13[4,5],ymm10[6],ymm13[7,8],ymm10[9],ymm13[10],ymm10[11],ymm13[12,13],ymm10[14],ymm13[15]
-; AVX512DQ-FCP-NEXT:    vprolq $16, %ymm14, %ymm13
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm6 = ymm6[0,1,2,1,4,5,6,5]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm6 = ymm13[0,1],ymm6[2],ymm13[3],ymm6[4],ymm13[5,6],ymm6[7],ymm13[8,9],ymm6[10],ymm13[11],ymm6[12],ymm13[13,14],ymm6[15]
-; AVX512DQ-FCP-NEXT:    vpermt2q %zmm10, %zmm23, %zmm6
-; AVX512DQ-FCP-NEXT:    vmovdqu (%rsp), %ymm14 # 32-byte Reload
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm9, %ymm14, %ymm10
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm13 = ymm29[3,2,3,3,7,6,7,7]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm10 = ymm10[0],ymm13[1],ymm10[2],ymm13[3,4],ymm10[5,6,7,8],ymm13[9],ymm10[10],ymm13[11,12],ymm10[13,14,15]
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm7, %ymm14, %ymm7
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm13 = ymm29[1,1,1,2,5,5,5,6]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm7 = ymm7[0],ymm13[1],ymm7[2,3],ymm13[4],ymm7[5],ymm13[6],ymm7[7,8],ymm13[9],ymm7[10,11],ymm13[12],ymm7[13],ymm13[14],ymm7[15]
-; AVX512DQ-FCP-NEXT:    vpermt2q %zmm10, %zmm19, %zmm7
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm6, %zmm21, %zmm7
-; AVX512DQ-FCP-NEXT:    vpbroadcastq 48(%r8), %ymm6
-; AVX512DQ-FCP-NEXT:    vpbroadcastq 56(%r8), %ymm10
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm10, %zmm6, %zmm6
-; AVX512DQ-FCP-NEXT:    vpternlogd $184, %zmm7, %zmm17, %zmm6
-; AVX512DQ-FCP-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; AVX512DQ-FCP-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
-; AVX512DQ-FCP-NEXT:    # xmm7 = xmm7[0],mem[0],xmm7[1],mem[1],xmm7[2],mem[2],xmm7[3],mem[3]
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm9, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[3,2,3,3,7,6,7,7]
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm13, %ymm1, %ymm2
+; AVX512DQ-FCP-NEXT:    vmovdqa 64(%rdx), %ymm3
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm9 = ymm3[3,0,3,0,7,4,7,4]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm2 = ymm9[0],ymm2[1],ymm9[2],ymm2[3],ymm9[4,5],ymm2[6],ymm9[7,8],ymm2[9],ymm9[10],ymm2[11],ymm9[12,13],ymm2[14],ymm9[15]
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm7, %ymm1, %ymm9
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm13 = ymm3[1,1,1,2,5,5,5,6]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm9 = ymm9[0],ymm13[1],ymm9[2,3],ymm13[4],ymm9[5],ymm13[6],ymm9[7,8],ymm13[9],ymm9[10,11],ymm13[12],ymm9[13],ymm13[14],ymm9[15]
+; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm9 = ymm9[2,3,2,3]
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm9, %zmm2, %zmm9
+; AVX512DQ-FCP-NEXT:    vmovdqa 64(%r8), %ymm5
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm15, %ymm5, %ymm13
+; AVX512DQ-FCP-NEXT:    vpandnq 80(%r8){1to4}, %ymm12, %ymm24
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm24, %zmm13, %zmm24
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm4, %ymm11, %ymm13
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm15 = ymm30[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm15[1],ymm13[2],ymm15[3],ymm13[4,5],ymm15[6],ymm13[7,8],ymm15[9],ymm13[10],ymm15[11],ymm13[12,13],ymm15[14],ymm13[15]
+; AVX512DQ-FCP-NEXT:    vprolq $16, %ymm11, %ymm11
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm15 = ymm30[0,1,2,1,4,5,6,5]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm11 = ymm11[0,1],ymm15[2],ymm11[3],ymm15[4],ymm11[5,6],ymm15[7],ymm11[8,9],ymm15[10],ymm11[11],ymm15[12],ymm11[13,14],ymm15[15]
+; AVX512DQ-FCP-NEXT:    vpermt2q %zmm13, %zmm22, %zmm11
+; AVX512DQ-FCP-NEXT:    vmovdqu (%rsp), %ymm0 # 32-byte Reload
+; AVX512DQ-FCP-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31,26,27,28,29,30,31,30,31]
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm2, %ymm0, %ymm13
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm15 = ymm29[3,2,3,3,7,6,7,7]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm13 = ymm13[0],ymm15[1],ymm13[2],ymm15[3,4],ymm13[5,6,7,8],ymm15[9],ymm13[10],ymm15[11,12],ymm13[13,14,15]
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm7, %ymm0, %ymm7
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm15 = ymm29[1,1,1,2,5,5,5,6]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm15 = ymm7[0],ymm15[1],ymm7[2,3],ymm15[4],ymm7[5],ymm15[6],ymm7[7,8],ymm15[9],ymm7[10,11],ymm15[12],ymm7[13],ymm15[14],ymm7[15]
+; AVX512DQ-FCP-NEXT:    vpermt2q %zmm13, %zmm18, %zmm15
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm11, %zmm31, %zmm15
+; AVX512DQ-FCP-NEXT:    vpbroadcastq 48(%r8), %ymm7
+; AVX512DQ-FCP-NEXT:    vpbroadcastq 56(%r8), %ymm11
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm11, %zmm7, %zmm7
+; AVX512DQ-FCP-NEXT:    vpternlogd $184, %zmm15, %zmm12, %zmm7
+; AVX512DQ-FCP-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; AVX512DQ-FCP-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm11, %xmm11 # 16-byte Folded Reload
+; AVX512DQ-FCP-NEXT:    # xmm11 = xmm11[0],mem[0],xmm11[1],mem[1],xmm11[2],mem[2],xmm11[3],mem[3]
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm0 = ymm3[3,2,3,3,7,6,7,7]
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3,4],ymm1[5,6,7,8],ymm0[9],ymm1[10],ymm0[11,12],ymm1[13,14,15]
-; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm14 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm14, %xmm7, %xmm1
-; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm7 = [2,2,3,2,8,9,8,9]
-; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm7, %zmm0
+; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm13 = [4,5,2,3,2,3,0,1,10,11,8,9,4,5,6,7]
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm13, %xmm11, %xmm1
+; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm11 = [2,2,3,2,8,9,8,9]
+; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm11, %zmm0
 ; AVX512DQ-FCP-NEXT:    vmovdqa 96(%rdi), %xmm1
 ; AVX512DQ-FCP-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
 ; AVX512DQ-FCP-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm11, %ymm8, %ymm8
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm10 = ymm24[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm10[1],ymm8[2],ymm10[3],ymm8[4,5],ymm10[6],ymm8[7,8],ymm10[9],ymm8[10],ymm10[11],ymm8[12,13],ymm10[14],ymm8[15]
-; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm13 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm13, %xmm1, %xmm1
-; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm10 = [2,3,2,2,8,9,8,9]
-; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm10, %zmm8
+; AVX512DQ-FCP-NEXT:    vmovdqa %ymm4, %ymm15
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm4, %ymm8, %ymm8
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm12 = ymm23[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm12[1],ymm8[2],ymm12[3],ymm8[4,5],ymm12[6],ymm8[7,8],ymm12[9],ymm8[10],ymm12[11],ymm8[12,13],ymm12[14],ymm8[15]
+; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,1,2,3,8,9,10,11,4,5,4,5,6,7,12,13]
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm12 = [2,3,2,2,8,9,8,9]
+; AVX512DQ-FCP-NEXT:    vpermt2q %zmm1, %zmm12, %zmm8
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
 ; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm0, %zmm1, %zmm8
 ; AVX512DQ-FCP-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX512DQ-FCP-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
 ; AVX512DQ-FCP-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm14, %xmm0, %xmm0
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm9, %ymm4, %ymm4
-; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm3[0,1,1,1]
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm5 = ymm5[3,2,3,3,7,6,7,7]
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm13, %xmm0, %xmm0
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm2, %ymm6, %ymm4
+; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm2 = ymm5[0,1,1,1]
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm5 = ymm10[3,2,3,3,7,6,7,7]
 ; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2],ymm5[3,4],ymm4[5,6,7,8],ymm5[9],ymm4[10],ymm5[11,12],ymm4[13,14,15]
-; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm7, %zmm4
+; AVX512DQ-FCP-NEXT:    vpermt2q %zmm0, %zmm11, %zmm4
 ; AVX512DQ-FCP-NEXT:    vpbroadcastq 88(%r8), %ymm0
 ; AVX512DQ-FCP-NEXT:    vpbroadcastq 96(%r8), %ymm5
 ; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm5, %zmm0, %zmm0
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535]
 ; AVX512DQ-FCP-NEXT:    vpternlogd $184, %zmm8, %zmm5, %zmm0
-; AVX512DQ-FCP-NEXT:    vmovdqa 32(%rdi), %xmm7
-; AVX512DQ-FCP-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm7, %xmm7 # 16-byte Folded Reload
-; AVX512DQ-FCP-NEXT:    # xmm7 = xmm7[0],mem[0],xmm7[1],mem[1],xmm7[2],mem[2],xmm7[3],mem[3]
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm13, %xmm7, %xmm7
+; AVX512DQ-FCP-NEXT:    vmovdqa 32(%rdi), %xmm8
+; AVX512DQ-FCP-NEXT:    vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm8, %xmm8 # 16-byte Folded Reload
+; AVX512DQ-FCP-NEXT:    # xmm8 = xmm8[0],mem[0],xmm8[1],mem[1],xmm8[2],mem[2],xmm8[3],mem[3]
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm3, %xmm8, %xmm8
 ; AVX512DQ-FCP-NEXT:    vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm11, %ymm3, %ymm8
-; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm9 = ymm27[0,1,1,1]
-; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm11 = ymm30[2,3,2,3,6,7,6,7]
-; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm8 = ymm8[0],ymm11[1],ymm8[2],ymm11[3],ymm8[4,5],ymm11[6],ymm8[7,8],ymm11[9],ymm8[10],ymm11[11],ymm8[12,13],ymm11[14],ymm8[15]
-; AVX512DQ-FCP-NEXT:    vpermt2q %zmm7, %zmm10, %zmm8
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm4, %zmm1, %zmm8
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm15, %ymm3, %ymm6
+; AVX512DQ-FCP-NEXT:    vpermq {{.*#+}} ymm10 = ymm26[0,1,1,1]
+; AVX512DQ-FCP-NEXT:    vpshufd {{.*#+}} ymm11 = ymm28[2,3,2,3,6,7,6,7]
+; AVX512DQ-FCP-NEXT:    vpblendw {{.*#+}} ymm6 = ymm6[0],ymm11[1],ymm6[2],ymm11[3],ymm6[4,5],ymm11[6],ymm6[7,8],ymm11[9],ymm6[10],ymm11[11],ymm6[12,13],ymm11[14],ymm6[15]
+; AVX512DQ-FCP-NEXT:    vpermt2q %zmm8, %zmm12, %zmm6
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm4, %zmm1, %zmm6
 ; AVX512DQ-FCP-NEXT:    vpbroadcastq 24(%r8), %ymm4
-; AVX512DQ-FCP-NEXT:    vpbroadcastq 32(%r8), %ymm7
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm4
-; AVX512DQ-FCP-NEXT:    vpternlogd $184, %zmm8, %zmm5, %zmm4
+; AVX512DQ-FCP-NEXT:    vpbroadcastq 32(%r8), %ymm8
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm8, %zmm4, %zmm4
+; AVX512DQ-FCP-NEXT:    vpternlogd $184, %zmm6, %zmm5, %zmm4
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0]
 ; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm3 # 64-byte Reload
 ; AVX512DQ-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm3 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm18 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm5, %zmm16 # 64-byte Folded Reload
 ; AVX512DQ-FCP-NEXT:    vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm8 # 64-byte Reload
+; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535]
 ; AVX512DQ-FCP-NEXT:    vpternlogq $248, %zmm5, %zmm3, %zmm8
-; AVX512DQ-FCP-NEXT:    vpternlogq $248, %zmm5, %zmm18, %zmm31
+; AVX512DQ-FCP-NEXT:    vpternlogq $248, %zmm5, %zmm16, %zmm17
 ; AVX512DQ-FCP-NEXT:    vpbroadcastq 64(%r8), %ymm5
 ; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm5, %zmm2
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm5 = [65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535]
 ; AVX512DQ-FCP-NEXT:    vpternlogd $184, %zmm20, %zmm5, %zmm2
-; AVX512DQ-FCP-NEXT:    vpbroadcastq (%r8), %ymm7
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
-; AVX512DQ-FCP-NEXT:    vpternlogd $184, %zmm22, %zmm5, %zmm7
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm26 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm25, %zmm1, %zmm12
+; AVX512DQ-FCP-NEXT:    vpbroadcastq (%r8), %ymm6
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm10, %zmm6, %zmm6
+; AVX512DQ-FCP-NEXT:    vpternlogd $184, %zmm21, %zmm5, %zmm6
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm25 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm19, %zmm1, %zmm9
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 {{.*#+}} zmm1 = [65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535,0,65535,65535,65535,65535]
-; AVX512DQ-FCP-NEXT:    vpternlogq $248, %zmm1, %zmm26, %zmm28
-; AVX512DQ-FCP-NEXT:    vpternlogq $248, %zmm1, %zmm12, %zmm15
+; AVX512DQ-FCP-NEXT:    vpternlogq $248, %zmm1, %zmm25, %zmm27
+; AVX512DQ-FCP-NEXT:    vpternlogq $248, %zmm1, %zmm9, %zmm24
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm4, 128(%r9)
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm6, 256(%r9)
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm7, 256(%r9)
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, 448(%r9)
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm15, 384(%r9)
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm16, 576(%r9)
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm28, 64(%r9)
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm7, (%r9)
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm24, 384(%r9)
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm14, 576(%r9)
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm27, 64(%r9)
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm6, (%r9)
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, 320(%r9)
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm31, 192(%r9)
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm17, 192(%r9)
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm8, 512(%r9)
 ; AVX512DQ-FCP-NEXT:    addq $360, %rsp # imm = 0x168
 ; AVX512DQ-FCP-NEXT:    vzeroupper
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
index b11d056959437..215a3a2ae5d05 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-8.ll
@@ -7091,7 +7091,7 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ;
 ; AVX512-LABEL: store_i8_stride8_vf64:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    subq $648, %rsp # imm = 0x288
+; AVX512-NEXT:    subq $680, %rsp # imm = 0x2A8
 ; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512-NEXT:    vmovdqa (%rcx), %xmm1
@@ -7118,8 +7118,8 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-NEXT:    vmovdqa 48(%rax), %xmm5
 ; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512-NEXT:    vinserti128 $1, %xmm8, %ymm1, %ymm1
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm0[0,2,2,3,4,5,6,7]
+; AVX512-NEXT:    vinserti128 $1, %xmm7, %ymm1, %ymm1
 ; AVX512-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,4,6,5]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
@@ -7127,10 +7127,10 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512-NEXT:    vmovdqa (%r9), %xmm1
 ; AVX512-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-NEXT:    vmovdqa 48(%r9), %xmm12
+; AVX512-NEXT:    vmovdqa 48(%r9), %xmm7
 ; AVX512-NEXT:    vmovdqa (%r8), %xmm0
 ; AVX512-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512-NEXT:    vmovdqa 48(%r8), %xmm13
+; AVX512-NEXT:    vmovdqa 48(%r8), %xmm12
 ; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,1,1,3,4,5,6,7]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm0[2,1,3,3,4,5,6,7]
@@ -7138,7 +7138,8 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,5,7]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,7,7]
-; AVX512-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm8
+; AVX512-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm0[0,2,2,3,4,5,6,7]
@@ -7146,8 +7147,7 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,4,6,5]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm9
 ; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm0[0,2,2,3,4,5,6,7]
@@ -7157,69 +7157,70 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
 ; AVX512-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX512-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm12[0],xmm7[0],xmm12[1],xmm7[1],xmm12[2],xmm7[2],xmm12[3],xmm7[3],xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,1,1,3,4,5,6,7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm0[2,1,3,3,4,5,6,7]
-; AVX512-NEXT:    vinserti128 $1, %xmm14, %ymm1, %ymm1
-; AVX512-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm0[2,1,3,3,4,5,6,7]
+; AVX512-NEXT:    vinserti128 $1, %xmm13, %ymm1, %ymm1
+; AVX512-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512-NEXT:    vmovdqa 32(%rdx), %xmm1
-; AVX512-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm0[0,1,2,3,4,5,5,7]
+; AVX512-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm0[0,1,2,3,4,5,5,7]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,7,7]
-; AVX512-NEXT:    vinserti32x4 $1, %xmm0, %ymm14, %ymm28
+; AVX512-NEXT:    vinserti128 $1, %xmm0, %ymm13, %ymm0
+; AVX512-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
 ; AVX512-NEXT:    vmovdqa 32(%r10), %xmm0
 ; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[0,0,2,1,4,5,6,7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm3[0,2,2,3,4,5,6,7]
-; AVX512-NEXT:    vinserti128 $1, %xmm14, %ymm2, %ymm11
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm3[0,2,2,3,4,5,6,7]
+; AVX512-NEXT:    vinserti128 $1, %xmm13, %ymm2, %ymm11
 ; AVX512-NEXT:    vmovdqa 32(%rax), %xmm2
-; AVX512-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm3[0,1,2,3,4,4,6,5]
+; AVX512-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm3[0,1,2,3,4,4,6,5]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti32x4 $1, %xmm3, %ymm14, %ymm31
+; AVX512-NEXT:    vinserti32x4 $1, %xmm3, %ymm13, %ymm31
 ; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm3[0,0,2,1,4,5,6,7]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[0,2,2,3,4,5,6,7]
-; AVX512-NEXT:    vinserti32x4 $1, %xmm5, %ymm4, %ymm24
+; AVX512-NEXT:    vinserti32x4 $1, %xmm5, %ymm4, %ymm28
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm3[0,1,2,3,4,4,6,5]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti32x4 $1, %xmm3, %ymm4, %ymm22
-; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
+; AVX512-NEXT:    vinserti32x4 $1, %xmm3, %ymm4, %ymm23
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm12[8],xmm7[8],xmm12[9],xmm7[9],xmm12[10],xmm7[10],xmm12[11],xmm7[11],xmm12[12],xmm7[12],xmm12[13],xmm7[13],xmm12[14],xmm7[14],xmm12[15],xmm7[15]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm3[0,1,1,3,4,5,6,7]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[2,1,3,3,4,5,6,7]
-; AVX512-NEXT:    vinserti32x4 $1, %xmm5, %ymm4, %ymm20
+; AVX512-NEXT:    vinserti32x4 $1, %xmm5, %ymm4, %ymm21
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm3[0,1,2,3,4,5,5,7]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,7,7]
-; AVX512-NEXT:    vinserti32x4 $1, %xmm3, %ymm4, %ymm18
+; AVX512-NEXT:    vinserti32x4 $1, %xmm3, %ymm4, %ymm20
 ; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3],xmm1[4],xmm10[4],xmm1[5],xmm10[5],xmm1[6],xmm10[6],xmm1[7],xmm10[7]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[0,0,2,1,4,5,6,7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm3[0,2,2,3,4,5,6,7]
-; AVX512-NEXT:    vinserti32x4 $1, %xmm12, %ymm5, %ymm30
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm3[0,2,2,3,4,5,6,7]
+; AVX512-NEXT:    vinserti32x4 $1, %xmm7, %ymm5, %ymm30
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,4,4,6,5]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
 ; AVX512-NEXT:    vinserti32x4 $1, %xmm3, %ymm5, %ymm29
 ; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[0,0,2,1,4,5,6,7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm3[0,2,2,3,4,5,6,7]
-; AVX512-NEXT:    vinserti32x4 $1, %xmm12, %ymm5, %ymm23
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm3[0,2,2,3,4,5,6,7]
+; AVX512-NEXT:    vinserti32x4 $1, %xmm7, %ymm5, %ymm24
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,4,4,6,5]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti32x4 $1, %xmm3, %ymm5, %ymm21
+; AVX512-NEXT:    vinserti32x4 $1, %xmm3, %ymm5, %ymm22
 ; AVX512-NEXT:    vmovdqa 32(%r9), %xmm3
 ; AVX512-NEXT:    vmovdqa 32(%r8), %xmm5
-; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3],xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm12[0,1,1,3,4,5,6,7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm12[2,1,3,3,4,5,6,7]
-; AVX512-NEXT:    vinserti32x4 $1, %xmm14, %ymm13, %ymm19
-; AVX512-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm12[0,1,2,3,4,5,5,7]
-; AVX512-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,6,5,7,7]
-; AVX512-NEXT:    vinserti32x4 $1, %xmm12, %ymm13, %ymm17
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3],xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm7[0,1,1,3,4,5,6,7]
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm7[2,1,3,3,4,5,6,7]
+; AVX512-NEXT:    vinserti32x4 $1, %xmm13, %ymm12, %ymm19
+; AVX512-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm7[0,1,2,3,4,5,5,7]
+; AVX512-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,5,7,7]
+; AVX512-NEXT:    vinserti32x4 $1, %xmm7, %ymm12, %ymm18
 ; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm10[8],xmm1[9],xmm10[9],xmm1[10],xmm10[10],xmm1[11],xmm10[11],xmm1[12],xmm10[12],xmm1[13],xmm10[13],xmm1[14],xmm10[14],xmm1[15],xmm10[15]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm1[0,0,2,1,4,5,6,7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm1[0,2,2,3,4,5,6,7]
-; AVX512-NEXT:    vinserti128 $1, %xmm12, %ymm10, %ymm4
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm1[0,0,2,1,4,5,6,7]
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm1[0,2,2,3,4,5,6,7]
+; AVX512-NEXT:    vinserti128 $1, %xmm10, %ymm7, %ymm4
 ; AVX512-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm1[0,1,2,3,4,4,6,5]
+; AVX512-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,4,4,6,5]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm10, %ymm1
+; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm7, %ymm1
 ; AVX512-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
@@ -7239,9 +7240,9 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,7,7]
 ; AVX512-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX512-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT:    vmovdqa 16(%rcx), %xmm13
+; AVX512-NEXT:    vmovdqa 16(%rcx), %xmm14
 ; AVX512-NEXT:    vmovdqa 16(%rdx), %xmm12
-; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3],xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7]
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm12[0],xmm14[0],xmm12[1],xmm14[1],xmm12[2],xmm14[2],xmm12[3],xmm14[3],xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm0[0,2,2,3,4,5,6,7]
 ; AVX512-NEXT:    vinserti128 $1, %xmm5, %ymm1, %ymm1
@@ -7254,7 +7255,7 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-NEXT:    vmovdqa 16(%rax), %xmm2
 ; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
 ; AVX512-NEXT:    vmovdqa64 %xmm2, %xmm26
-; AVX512-NEXT:    vmovdqa64 %xmm1, %xmm16
+; AVX512-NEXT:    vmovdqa64 %xmm1, %xmm17
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm0[0,2,2,3,4,5,6,7]
 ; AVX512-NEXT:    vinserti128 $1, %xmm5, %ymm1, %ymm1
@@ -7263,19 +7264,20 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
 ; AVX512-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX512-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT:    vmovdqa 16(%r9), %xmm15
-; AVX512-NEXT:    vmovdqa 16(%r8), %xmm14
-; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3],xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm7[0,1,1,3,4,5,6,7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm7[2,1,3,3,4,5,6,7]
-; AVX512-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm0
+; AVX512-NEXT:    vmovdqa 16(%r9), %xmm0
+; AVX512-NEXT:    vmovdqa 16(%r8), %xmm15
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; AVX512-NEXT:    vmovdqa64 %xmm0, %xmm16
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm6[0,1,1,3,4,5,6,7]
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm6[2,1,3,3,4,5,6,7]
+; AVX512-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm0
 ; AVX512-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512-NEXT:    vmovdqa (%rsi), %xmm1
 ; AVX512-NEXT:    vmovdqa (%rdi), %xmm2
 ; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
 ; AVX512-NEXT:    vmovdqa64 %xmm2, %xmm25
 ; AVX512-NEXT:    vmovdqa64 %xmm1, %xmm27
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[2,3,2,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm13 = xmm0[2,3,2,3]
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
@@ -7284,60 +7286,61 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
 ; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm9 = xmm0[3,3,3,3]
-; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[3,3,3,3]
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm8 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm13[0],zero,zero,zero,xmm13[1],zero,zero,zero
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
-; AVX512-NEXT:    vinserti128 $1, %xmm3, %ymm6, %ymm3
+; AVX512-NEXT:    vinserti128 $1, %xmm3, %ymm10, %ymm3
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
 ; AVX512-NEXT:    vinserti128 $1, %xmm4, %ymm5, %ymm4
-; AVX512-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm4
-; AVX512-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512-NEXT:    # ymm3 = mem[0,1,1,3,4,5,5,7]
+; AVX512-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
+; AVX512-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
+; AVX512-NEXT:    # ymm4 = mem[0,1,1,3,4,5,5,7]
 ; AVX512-NEXT:    vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
 ; AVX512-NEXT:    # ymm5 = mem[2,1,3,3,6,5,7,7]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm6
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535]
-; AVX512-NEXT:    vpternlogq $184, %zmm4, %zmm3, %zmm6
-; AVX512-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512-NEXT:    # ymm4 = mem[0,0,2,1,4,4,6,5]
+; AVX512-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm10
+; AVX512-NEXT:    vpbroadcastq {{.*#+}} zmm13 = [65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535]
+; AVX512-NEXT:    vpternlogq $184, %zmm3, %zmm13, %zmm10
+; AVX512-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
+; AVX512-NEXT:    # ymm3 = mem[0,0,2,1,4,4,6,5]
+; AVX512-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
+; AVX512-NEXT:    # ymm4 = mem[0,2,2,3,4,6,6,7]
+; AVX512-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm4
+; AVX512-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
+; AVX512-NEXT:    # ymm3 = mem[0,0,2,1,4,4,6,5]
 ; AVX512-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
 ; AVX512-NEXT:    # ymm5 = mem[0,2,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512-NEXT:    # ymm5 = mem[0,0,2,1,4,4,6,5]
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[0,2,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm5
-; AVX512-NEXT:    vpbroadcastq {{.*#+}} zmm8 = [65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0]
-; AVX512-NEXT:    vpandnq %zmm4, %zmm8, %zmm4
-; AVX512-NEXT:    vpandq %zmm8, %zmm5, %zmm5
+; AVX512-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm5
+; AVX512-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0]
+; AVX512-NEXT:    vpandnq %zmm4, %zmm3, %zmm4
+; AVX512-NEXT:    vpandq %zmm3, %zmm5, %zmm5
 ; AVX512-NEXT:    movw $-21846, %ax # imm = 0xAAAA
 ; AVX512-NEXT:    kmovw %eax, %k1
-; AVX512-NEXT:    vpord %zmm4, %zmm5, %zmm6 {%k1}
+; AVX512-NEXT:    vpord %zmm4, %zmm5, %zmm10 {%k1}
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
-; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
 ; AVX512-NEXT:    vinserti128 $1, %xmm4, %ymm2, %ymm2
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; AVX512-NEXT:    vinserti128 $1, %xmm0, %ymm10, %ymm0
+; AVX512-NEXT:    vinserti128 $1, %xmm0, %ymm8, %ymm0
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX512-NEXT:    # ymm2 = mem[0,1,1,3,4,5,5,7]
-; AVX512-NEXT:    vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512-NEXT:    # ymm4 = mem[2,1,3,3,6,5,7,7]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm4 = ymm9[2,1,3,3,6,5,7,7]
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm9
-; AVX512-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm9
+; AVX512-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm9
 ; AVX512-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX512-NEXT:    # ymm0 = mem[0,0,2,1,4,4,6,5]
 ; AVX512-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX512-NEXT:    # ymm2 = mem[0,2,2,3,4,6,6,7]
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512-NEXT:    vpshufd $96, (%rsp), %ymm2 # 32-byte Folded Reload
+; AVX512-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX512-NEXT:    # ymm2 = mem[0,0,2,1,4,4,6,5]
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm4 = ymm28[0,2,2,3,4,6,6,7]
+; AVX512-NEXT:    vpshufd $232, (%rsp), %ymm4 # 32-byte Folded Reload
+; AVX512-NEXT:    # ymm4 = mem[0,2,2,3,4,6,6,7]
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
-; AVX512-NEXT:    vpandnq %zmm0, %zmm8, %zmm0
-; AVX512-NEXT:    vpandq %zmm8, %zmm2, %zmm2
+; AVX512-NEXT:    vpandnq %zmm0, %zmm3, %zmm0
+; AVX512-NEXT:    vpandq %zmm3, %zmm2, %zmm2
 ; AVX512-NEXT:    vpord %zmm0, %zmm2, %zmm9 {%k1}
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
@@ -7352,154 +7355,155 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm1 = ymm11[0,1,1,3,4,5,5,7]
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm2 = ymm31[2,1,3,3,6,5,7,7]
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm11
-; AVX512-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm11
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm0 = ymm24[0,0,2,1,4,4,6,5]
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm1 = ymm22[0,2,2,3,4,6,6,7]
+; AVX512-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm11
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm0 = ymm28[0,0,2,1,4,4,6,5]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm1 = ymm23[0,2,2,3,4,6,6,7]
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm1 = ymm20[0,0,2,1,4,4,6,5]
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm2 = ymm18[0,2,2,3,4,6,6,7]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm1 = ymm21[0,0,2,1,4,4,6,5]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm2 = ymm20[0,2,2,3,4,6,6,7]
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512-NEXT:    vpandnq %zmm0, %zmm8, %zmm0
-; AVX512-NEXT:    vpandq %zmm8, %zmm1, %zmm1
+; AVX512-NEXT:    vpandnq %zmm0, %zmm3, %zmm0
+; AVX512-NEXT:    vpandq %zmm3, %zmm1, %zmm1
 ; AVX512-NEXT:    vpord %zmm0, %zmm1, %zmm11 {%k1}
-; AVX512-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm7[0,1,2,3,4,5,5,7]
-; AVX512-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm7[0,1,2,3,6,5,7,7]
+; AVX512-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,4,5,5,7]
+; AVX512-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,6,5,7,7]
 ; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm12[8],xmm13[8],xmm12[9],xmm13[9],xmm12[10],xmm13[10],xmm12[11],xmm13[11],xmm12[12],xmm13[12],xmm12[13],xmm13[13],xmm12[14],xmm13[14],xmm12[15],xmm13[15]
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm12[8],xmm14[8],xmm12[9],xmm14[9],xmm12[10],xmm14[10],xmm12[11],xmm14[11],xmm12[12],xmm14[12],xmm12[13],xmm14[13],xmm12[14],xmm14[14],xmm12[15],xmm14[15]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm2[0,0,2,1,4,5,6,7]
 ; AVX512-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[0,2,2,3,4,5,6,7]
 ; AVX512-NEXT:    vinserti32x4 $1, %xmm1, %ymm0, %ymm28
-; AVX512-NEXT:    vmovdqa 32(%rsi), %xmm0
+; AVX512-NEXT:    vmovdqa 32(%rsi), %xmm5
 ; AVX512-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
-; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[3,3,3,3]
-; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
-; AVX512-NEXT:    vinserti128 $1, %xmm7, %ymm5, %ymm5
-; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm7 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,1,1,1]
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
-; AVX512-NEXT:    vinserti128 $1, %xmm4, %ymm7, %ymm4
-; AVX512-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm5
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[3,3,3,3]
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero
+; AVX512-NEXT:    vinserti128 $1, %xmm6, %ymm4, %ymm4
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512-NEXT:    vinserti128 $1, %xmm0, %ymm6, %ymm0
+; AVX512-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm4 = ymm30[0,1,1,3,4,5,5,7]
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm7 = ymm29[2,1,3,3,6,5,7,7]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm4
-; AVX512-NEXT:    vpternlogq $184, %zmm5, %zmm3, %zmm4
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm5 = ymm23[0,0,2,1,4,4,6,5]
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm7 = ymm21[0,2,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm7 = ymm19[0,0,2,1,4,4,6,5]
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm10 = ymm17[0,2,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm10, %zmm7, %zmm7
-; AVX512-NEXT:    vpandnq %zmm5, %zmm8, %zmm5
-; AVX512-NEXT:    vpandq %zmm8, %zmm7, %zmm7
-; AVX512-NEXT:    vpord %zmm5, %zmm7, %zmm4 {%k1}
-; AVX512-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,6,5]
-; AVX512-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti32x4 $1, %xmm2, %ymm5, %ymm18
-; AVX512-NEXT:    vmovdqa64 %xmm26, %xmm2
-; AVX512-NEXT:    vmovdqa64 %xmm16, %xmm5
-; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm2[0,0,2,1,4,5,6,7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm2[0,2,2,3,4,5,6,7]
-; AVX512-NEXT:    vinserti32x4 $1, %xmm7, %ymm5, %ymm21
-; AVX512-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,6,5]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm6 = ymm29[2,1,3,3,6,5,7,7]
+; AVX512-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm4
+; AVX512-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm4
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm0 = ymm24[0,0,2,1,4,4,6,5]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm6 = ymm22[0,2,2,3,4,6,6,7]
+; AVX512-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm0
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm6 = ymm19[0,0,2,1,4,4,6,5]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm7 = ymm18[0,2,2,3,4,6,6,7]
+; AVX512-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512-NEXT:    vpandnq %zmm0, %zmm3, %zmm0
+; AVX512-NEXT:    vpandq %zmm3, %zmm6, %zmm6
+; AVX512-NEXT:    vpord %zmm0, %zmm6, %zmm4 {%k1}
+; AVX512-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,4,6,5]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti32x4 $1, %xmm2, %ymm5, %ymm22
-; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm14[8],xmm15[8],xmm14[9],xmm15[9],xmm14[10],xmm15[10],xmm14[11],xmm15[11],xmm14[12],xmm15[12],xmm14[13],xmm15[13],xmm14[14],xmm15[14],xmm14[15],xmm15[15]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm5[0,1,1,3,4,5,6,7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm5[2,1,3,3,4,5,6,7]
+; AVX512-NEXT:    vinserti32x4 $1, %xmm2, %ymm0, %ymm18
+; AVX512-NEXT:    vmovdqa64 %xmm26, %xmm0
+; AVX512-NEXT:    vmovdqa64 %xmm17, %xmm2
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[0,0,2,1,4,5,6,7]
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm0[0,2,2,3,4,5,6,7]
+; AVX512-NEXT:    vinserti32x4 $1, %xmm6, %ymm2, %ymm21
+; AVX512-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,6,5]
+; AVX512-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; AVX512-NEXT:    vinserti32x4 $1, %xmm0, %ymm2, %ymm22
+; AVX512-NEXT:    vmovdqa64 %xmm16, %xmm0
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm15[8],xmm0[8],xmm15[9],xmm0[9],xmm15[10],xmm0[10],xmm15[11],xmm0[11],xmm15[12],xmm0[12],xmm15[13],xmm0[13],xmm15[14],xmm0[14],xmm15[15],xmm0[15]
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[0,1,1,3,4,5,6,7]
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm6[2,1,3,3,4,5,6,7]
 ; AVX512-NEXT:    vinserti32x4 $1, %xmm7, %ymm2, %ymm26
-; AVX512-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm5[0,1,2,3,4,5,5,7]
-; AVX512-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,7,7]
-; AVX512-NEXT:    vinserti32x4 $1, %xmm5, %ymm7, %ymm19
-; AVX512-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm5 # 16-byte Folded Reload
-; AVX512-NEXT:    # xmm5 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3],xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm5[0,0,2,1,4,5,6,7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm5[0,2,2,3,4,5,6,7]
-; AVX512-NEXT:    vinserti128 $1, %xmm13, %ymm10, %ymm14
-; AVX512-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm5[0,1,2,3,4,4,6,5]
-; AVX512-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti128 $1, %xmm5, %ymm10, %ymm15
-; AVX512-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm5 # 16-byte Folded Reload
-; AVX512-NEXT:    # xmm5 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3],xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm5[0,0,2,1,4,5,6,7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm5[0,2,2,3,4,5,6,7]
-; AVX512-NEXT:    vinserti32x4 $1, %xmm13, %ymm10, %ymm17
-; AVX512-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm5[0,1,2,3,4,4,6,5]
-; AVX512-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti32x4 $1, %xmm5, %ymm10, %ymm20
-; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX512-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm6[0,1,2,3,4,5,5,7]
+; AVX512-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,5,7,7]
+; AVX512-NEXT:    vinserti32x4 $1, %xmm6, %ymm7, %ymm19
+; AVX512-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm7 # 16-byte Folded Reload
+; AVX512-NEXT:    # xmm7 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm7[0,0,2,1,4,5,6,7]
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm7[0,2,2,3,4,5,6,7]
+; AVX512-NEXT:    vinserti128 $1, %xmm12, %ymm8, %ymm14
+; AVX512-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm7[0,1,2,3,4,4,6,5]
+; AVX512-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,6,7]
+; AVX512-NEXT:    vinserti128 $1, %xmm7, %ymm8, %ymm15
+; AVX512-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm7 # 16-byte Folded Reload
+; AVX512-NEXT:    # xmm7 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm7[0,0,2,1,4,5,6,7]
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm7[0,2,2,3,4,5,6,7]
+; AVX512-NEXT:    vinserti32x4 $1, %xmm12, %ymm8, %ymm17
+; AVX512-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm7[0,1,2,3,4,4,6,5]
+; AVX512-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,6,7]
+; AVX512-NEXT:    vinserti32x4 $1, %xmm7, %ymm8, %ymm20
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
 ; AVX512-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX512-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload
 ; AVX512-NEXT:    # xmm1 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm1[0,1,1,3,4,5,6,7]
-; AVX512-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm1[2,1,3,3,4,5,6,7]
-; AVX512-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm0
-; AVX512-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm1[0,1,2,3,4,5,5,7]
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm1[0,1,1,3,4,5,6,7]
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm1[2,1,3,3,4,5,6,7]
+; AVX512-NEXT:    vinserti128 $1, %xmm8, %ymm7, %ymm8
+; AVX512-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,4,5,5,7]
 ; AVX512-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,7,7]
-; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm10, %ymm1
-; AVX512-NEXT:    vmovdqa 16(%rsi), %xmm10
-; AVX512-NEXT:    vmovdqa 16(%rdi), %xmm13
-; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3],xmm13[4],xmm10[4],xmm13[5],xmm10[5],xmm13[6],xmm10[6],xmm13[7],xmm10[7]
-; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm10 = xmm13[8],xmm10[8],xmm13[9],xmm10[9],xmm13[10],xmm10[10],xmm13[11],xmm10[11],xmm13[12],xmm10[12],xmm13[13],xmm10[13],xmm13[14],xmm10[14],xmm13[15],xmm10[15]
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm13 = xmm5[2,3,2,3]
-; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm13 = xmm13[0],zero,zero,zero,xmm13[1],zero,zero,zero
+; AVX512-NEXT:    vinserti128 $1, %xmm1, %ymm7, %ymm1
+; AVX512-NEXT:    vmovdqa 16(%rsi), %xmm7
+; AVX512-NEXT:    vmovdqa 16(%rdi), %xmm12
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm12[0],xmm7[0],xmm12[1],xmm7[1],xmm12[2],xmm7[2],xmm12[3],xmm7[3],xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7]
+; AVX512-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm12[8],xmm7[8],xmm12[9],xmm7[9],xmm12[10],xmm7[10],xmm12[11],xmm7[11],xmm12[12],xmm7[12],xmm12[13],xmm7[13],xmm12[14],xmm7[14],xmm12[15],xmm7[15]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm12 = xmm5[2,3,2,3]
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm12 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm23 = xmm5[3,3,3,3]
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm23 = xmm23[0],zero,zero,zero,xmm23[1],zero,zero,zero
-; AVX512-NEXT:    vinserti32x4 $1, %xmm23, %ymm13, %ymm13
+; AVX512-NEXT:    vinserti32x4 $1, %xmm23, %ymm12, %ymm12
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm23 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,1,1]
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero
 ; AVX512-NEXT:    vinserti32x4 $1, %xmm5, %ymm23, %ymm5
-; AVX512-NEXT:    vinserti64x4 $1, %ymm13, %zmm5, %zmm13
+; AVX512-NEXT:    vinserti64x4 $1, %ymm12, %zmm5, %zmm12
 ; AVX512-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
 ; AVX512-NEXT:    # ymm5 = mem[0,1,1,3,4,5,5,7]
 ; AVX512-NEXT:    vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
 ; AVX512-NEXT:    # ymm23 = mem[2,1,3,3,6,5,7,7]
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm23, %zmm5, %zmm5
-; AVX512-NEXT:    vpternlogq $184, %zmm13, %zmm3, %zmm5
-; AVX512-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX512-NEXT:    # ymm13 = mem[0,0,2,1,4,4,6,5]
+; AVX512-NEXT:    vpternlogq $184, %zmm12, %zmm13, %zmm5
+; AVX512-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512-NEXT:    # ymm12 = mem[0,0,2,1,4,4,6,5]
 ; AVX512-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
 ; AVX512-NEXT:    # ymm23 = mem[0,2,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm23, %zmm13, %zmm13
-; AVX512-NEXT:    vpandnq %zmm13, %zmm8, %zmm13
+; AVX512-NEXT:    vinserti64x4 $1, %ymm23, %zmm12, %zmm12
+; AVX512-NEXT:    vpandnq %zmm12, %zmm3, %zmm12
 ; AVX512-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
 ; AVX512-NEXT:    # ymm23 = mem[0,0,2,1,4,4,6,5]
 ; AVX512-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload
 ; AVX512-NEXT:    # ymm24 = mem[0,2,2,3,4,6,6,7]
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm24, %zmm23, %zmm23
-; AVX512-NEXT:    vpandq %zmm8, %zmm23, %zmm23
-; AVX512-NEXT:    vpord %zmm13, %zmm23, %zmm5 {%k1}
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm13 = xmm12[2,3,2,3]
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm23 = xmm12[3,3,3,3]
-; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm24 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[1,1,1,1]
+; AVX512-NEXT:    vpandq %zmm3, %zmm23, %zmm23
+; AVX512-NEXT:    vpord %zmm12, %zmm23, %zmm5 {%k1}
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm12 = xmm0[2,3,2,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm23 = xmm0[3,3,3,3]
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm24 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
 ; AVX512-NEXT:    vmovdqa64 %xmm25, %xmm2
-; AVX512-NEXT:    vmovdqa64 %xmm27, %xmm7
-; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3],xmm2[4],xmm7[4],xmm2[5],xmm7[5],xmm2[6],xmm7[6],xmm2[7],xmm7[7]
-; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm13 = xmm13[0],zero,zero,zero,xmm13[1],zero,zero,zero
-; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm23 = xmm23[0],zero,zero,zero,xmm23[1],zero,zero,zero
-; AVX512-NEXT:    vinserti32x4 $1, %xmm23, %ymm13, %ymm13
+; AVX512-NEXT:    vmovdqa64 %xmm27, %xmm6
+; AVX512-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm12 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero
-; AVX512-NEXT:    vinserti32x4 $1, %xmm12, %ymm24, %ymm12
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm23 = xmm10[2,3,2,3]
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm24 = xmm10[3,3,3,3]
-; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm29 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[1,1,1,1]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm12
-; AVX512-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX512-NEXT:    # ymm13 = mem[0,1,1,3,4,5,5,7]
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm23 = xmm23[0],zero,zero,zero,xmm23[1],zero,zero,zero
+; AVX512-NEXT:    vinserti32x4 $1, %xmm23, %ymm12, %ymm12
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512-NEXT:    vinserti32x4 $1, %xmm0, %ymm24, %ymm0
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm23 = xmm7[2,3,2,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm24 = xmm7[3,3,3,3]
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm29 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,1,1]
+; AVX512-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm0
+; AVX512-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512-NEXT:    # ymm12 = mem[0,1,1,3,4,5,5,7]
 ; AVX512-NEXT:    vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm30 # 32-byte Folded Reload
 ; AVX512-NEXT:    # ymm30 = mem[2,1,3,3,6,5,7,7]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm30, %zmm13, %zmm13
-; AVX512-NEXT:    vpternlogq $184, %zmm12, %zmm3, %zmm13
-; AVX512-NEXT:    vpshufd {{.*#+}} xmm12 = xmm2[2,3,2,3]
+; AVX512-NEXT:    vinserti64x4 $1, %ymm30, %zmm12, %zmm12
+; AVX512-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm12
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm30 = xmm2[3,3,3,3]
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm31 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
@@ -7513,57 +7517,57 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Folded Reload
 ; AVX512-NEXT:    # ymm16 = mem[0,2,2,3,4,6,6,7]
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm16, %zmm27, %zmm16
-; AVX512-NEXT:    vpandnq %zmm25, %zmm8, %zmm25
-; AVX512-NEXT:    vpandq %zmm8, %zmm16, %zmm16
-; AVX512-NEXT:    vpord %zmm25, %zmm16, %zmm13 {%k1}
+; AVX512-NEXT:    vpandnq %zmm25, %zmm3, %zmm25
+; AVX512-NEXT:    vpandq %zmm3, %zmm16, %zmm16
+; AVX512-NEXT:    vpord %zmm25, %zmm16, %zmm12 {%k1}
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm16 = xmm23[0],zero,zero,zero,xmm23[1],zero,zero,zero
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm23 = xmm24[0],zero,zero,zero,xmm24[1],zero,zero,zero
 ; AVX512-NEXT:    vinserti32x4 $1, %xmm23, %ymm16, %ymm16
-; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero
-; AVX512-NEXT:    vinserti32x4 $1, %xmm10, %ymm29, %ymm10
-; AVX512-NEXT:    vinserti64x4 $1, %ymm16, %zmm10, %zmm10
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
+; AVX512-NEXT:    vinserti32x4 $1, %xmm7, %ymm29, %ymm7
+; AVX512-NEXT:    vinserti64x4 $1, %ymm16, %zmm7, %zmm7
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm16 = ymm28[0,1,1,3,4,5,5,7]
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm18 = ymm18[2,1,3,3,6,5,7,7]
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm18, %zmm16, %zmm16
-; AVX512-NEXT:    vpternlogq $184, %zmm10, %zmm3, %zmm16
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm10 = ymm21[0,0,2,1,4,4,6,5]
+; AVX512-NEXT:    vpternlogq $184, %zmm7, %zmm13, %zmm16
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm7 = ymm21[0,0,2,1,4,4,6,5]
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm18 = ymm22[0,2,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm18, %zmm10, %zmm10
+; AVX512-NEXT:    vinserti64x4 $1, %ymm18, %zmm7, %zmm7
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm18 = ymm26[0,0,2,1,4,4,6,5]
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm7 = ymm19[0,2,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm7, %zmm18, %zmm7
-; AVX512-NEXT:    vpandnq %zmm10, %zmm8, %zmm10
-; AVX512-NEXT:    vpandq %zmm8, %zmm7, %zmm7
-; AVX512-NEXT:    vpord %zmm10, %zmm7, %zmm16 {%k1}
-; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm7 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero
-; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm30[0],zero,zero,zero,xmm30[1],zero,zero,zero
-; AVX512-NEXT:    vinserti128 $1, %xmm10, %ymm7, %ymm7
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm6 = ymm19[0,2,2,3,4,6,6,7]
+; AVX512-NEXT:    vinserti64x4 $1, %ymm6, %zmm18, %zmm6
+; AVX512-NEXT:    vpandnq %zmm7, %zmm3, %zmm7
+; AVX512-NEXT:    vpandq %zmm3, %zmm6, %zmm6
+; AVX512-NEXT:    vpord %zmm7, %zmm6, %zmm16 {%k1}
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm6 = xmm30[0],zero,zero,zero,xmm30[1],zero,zero,zero
+; AVX512-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm0
 ; AVX512-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
 ; AVX512-NEXT:    vinserti32x4 $1, %xmm2, %ymm31, %ymm2
-; AVX512-NEXT:    vinserti64x4 $1, %ymm7, %zmm2, %zmm2
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm7 = ymm14[0,1,1,3,4,5,5,7]
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm10 = ymm15[2,1,3,3,6,5,7,7]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm10, %zmm7, %zmm7
-; AVX512-NEXT:    vpternlogq $184, %zmm2, %zmm3, %zmm7
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm2 = ymm17[0,0,2,1,4,4,6,5]
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm3 = ymm20[0,2,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,1,4,4,6,5]
+; AVX512-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm2 = ymm14[0,1,1,3,4,5,5,7]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm6 = ymm15[2,1,3,3,6,5,7,7]
+; AVX512-NEXT:    vinserti64x4 $1, %ymm6, %zmm2, %zmm2
+; AVX512-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm2
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm0 = ymm17[0,0,2,1,4,4,6,5]
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm6 = ymm20[0,2,2,3,4,6,6,7]
+; AVX512-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm0
+; AVX512-NEXT:    vpshufd {{.*#+}} ymm6 = ymm8[0,0,2,1,4,4,6,5]
 ; AVX512-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
-; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512-NEXT:    vpandnq %zmm2, %zmm8, %zmm1
-; AVX512-NEXT:    vpandq %zmm8, %zmm0, %zmm0
-; AVX512-NEXT:    vpord %zmm1, %zmm0, %zmm7 {%k1}
+; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm6, %zmm1
+; AVX512-NEXT:    vpandnq %zmm0, %zmm3, %zmm0
+; AVX512-NEXT:    vpandq %zmm3, %zmm1, %zmm1
+; AVX512-NEXT:    vpord %zmm0, %zmm1, %zmm2 {%k1}
 ; AVX512-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512-NEXT:    vmovdqa64 %zmm7, (%rax)
+; AVX512-NEXT:    vmovdqa64 %zmm2, (%rax)
 ; AVX512-NEXT:    vmovdqa64 %zmm16, 192(%rax)
-; AVX512-NEXT:    vmovdqa64 %zmm13, 128(%rax)
+; AVX512-NEXT:    vmovdqa64 %zmm12, 128(%rax)
 ; AVX512-NEXT:    vmovdqa64 %zmm5, 320(%rax)
 ; AVX512-NEXT:    vmovdqa64 %zmm4, 256(%rax)
 ; AVX512-NEXT:    vmovdqa64 %zmm11, 448(%rax)
 ; AVX512-NEXT:    vmovdqa64 %zmm9, 384(%rax)
-; AVX512-NEXT:    vmovdqa64 %zmm6, 64(%rax)
-; AVX512-NEXT:    addq $648, %rsp # imm = 0x288
+; AVX512-NEXT:    vmovdqa64 %zmm10, 64(%rax)
+; AVX512-NEXT:    addq $680, %rsp # imm = 0x2A8
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
 ;
@@ -7725,22 +7729,22 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-FCP-NEXT:    vmovdqa (%rsi), %xmm12
 ; AVX512-FCP-NEXT:    vmovdqa (%rdi), %xmm9
 ; AVX512-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm9[8],xmm12[8],xmm9[9],xmm12[9],xmm9[10],xmm12[10],xmm9[11],xmm12[11],xmm9[12],xmm12[12],xmm9[13],xmm12[13],xmm9[14],xmm12[14],xmm9[15],xmm12[15]
-; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm7 = [4,5,u,u,u,u,u,u,6,7,u,u,u,u,u,u]
+; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} xmm5 = [4,5,u,u,u,u,u,u,6,7,u,u,u,u,u,u]
 ; AVX512-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; AVX512-FCP-NEXT:    vpshufb %xmm7, %xmm0, %xmm2
+; AVX512-FCP-NEXT:    vpshufb %xmm5, %xmm0, %xmm2
 ; AVX512-FCP-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
 ; AVX512-FCP-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm5 = [8,9,u,u,u,u,u,u,10,11,u,u,u,u,u,u,12,13,u,u,u,u,u,u,14,15,u,u,u,u,u,u]
-; AVX512-FCP-NEXT:    vpshufb %ymm5, %ymm0, %ymm0
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm4
-; AVX512-FCP-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535]
-; AVX512-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm4 # 64-byte Folded Reload
-; AVX512-FCP-NEXT:    vpbroadcastq {{.*#+}} zmm0 = [65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0]
-; AVX512-FCP-NEXT:    vpandnq %zmm19, %zmm0, %zmm19
-; AVX512-FCP-NEXT:    vpandq %zmm0, %zmm21, %zmm21
+; AVX512-FCP-NEXT:    vmovdqa {{.*#+}} ymm4 = [8,9,u,u,u,u,u,u,10,11,u,u,u,u,u,u,12,13,u,u,u,u,u,u,14,15,u,u,u,u,u,u]
+; AVX512-FCP-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm3
+; AVX512-FCP-NEXT:    vpbroadcastq {{.*#+}} zmm7 = [65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535]
+; AVX512-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm3 # 64-byte Folded Reload
+; AVX512-FCP-NEXT:    vpbroadcastq {{.*#+}} zmm2 = [65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0]
+; AVX512-FCP-NEXT:    vpandnq %zmm19, %zmm2, %zmm19
+; AVX512-FCP-NEXT:    vpandq %zmm2, %zmm21, %zmm21
 ; AVX512-FCP-NEXT:    movw $-21846, %ax # imm = 0xAAAA
 ; AVX512-FCP-NEXT:    kmovw %eax, %k1
-; AVX512-FCP-NEXT:    vpord %zmm19, %zmm21, %zmm4 {%k1}
+; AVX512-FCP-NEXT:    vpord %zmm19, %zmm21, %zmm3 {%k1}
 ; AVX512-FCP-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
 ; AVX512-FCP-NEXT:    # xmm15 = xmm15[0],mem[0],xmm15[1],mem[1],xmm15[2],mem[2],xmm15[3],mem[3],xmm15[4],mem[4],xmm15[5],mem[5],xmm15[6],mem[6],xmm15[7],mem[7]
 ; AVX512-FCP-NEXT:    vinserti128 $1, %xmm15, %ymm15, %ymm15
@@ -7750,117 +7754,117 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512-FCP-NEXT:    vmovdqa 48(%rsi), %xmm15
 ; AVX512-FCP-NEXT:    vmovdqa 48(%rdi), %xmm8
 ; AVX512-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm8[0],xmm15[0],xmm8[1],xmm15[1],xmm8[2],xmm15[2],xmm8[3],xmm15[3],xmm8[4],xmm15[4],xmm8[5],xmm15[5],xmm8[6],xmm15[6],xmm8[7],xmm15[7]
-; AVX512-FCP-NEXT:    vpshufb %xmm7, %xmm6, %xmm1
+; AVX512-FCP-NEXT:    vpshufb %xmm5, %xmm6, %xmm0
 ; AVX512-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm21 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero
-; AVX512-FCP-NEXT:    vinserti32x4 $1, %xmm1, %ymm21, %ymm1
+; AVX512-FCP-NEXT:    vinserti32x4 $1, %xmm0, %ymm21, %ymm0
 ; AVX512-FCP-NEXT:    vinserti128 $1, %xmm6, %ymm6, %ymm6
-; AVX512-FCP-NEXT:    vpshufb %ymm5, %ymm6, %ymm6
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm6
-; AVX512-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm8[8],xmm15[8],xmm8[9],xmm15[9],xmm8[10],xmm15[10],xmm8[11],xmm15[11],xmm8[12],xmm15[12],xmm8[13],xmm15[13],xmm8[14],xmm15[14],xmm8[15],xmm15[15]
-; AVX512-FCP-NEXT:    vpshufb %xmm7, %xmm1, %xmm8
-; AVX512-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm15 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512-FCP-NEXT:    vpshufb %ymm4, %ymm6, %ymm6
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm6
+; AVX512-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm8[8],xmm15[8],xmm8[9],xmm15[9],xmm8[10],xmm15[10],xmm8[11],xmm15[11],xmm8[12],xmm15[12],xmm8[13],xmm15[13],xmm8[14],xmm15[14],xmm8[15],xmm15[15]
+; AVX512-FCP-NEXT:    vpshufb %xmm5, %xmm0, %xmm8
+; AVX512-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm15 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; AVX512-FCP-NEXT:    vinserti128 $1, %xmm8, %ymm15, %ymm8
-; AVX512-FCP-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX512-FCP-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm15
-; AVX512-FCP-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm1 # 16-byte Folded Reload
-; AVX512-FCP-NEXT:    # xmm1 = xmm14[0],mem[0],xmm14[1],mem[1],xmm14[2],mem[2],xmm14[3],mem[3],xmm14[4],mem[4],xmm14[5],mem[5],xmm14[6],mem[6],xmm14[7],mem[7]
-; AVX512-FCP-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX512-FCP-NEXT:    vpshufb %ymm11, %ymm1, %ymm8
-; AVX512-FCP-NEXT:    vpshufb %ymm10, %ymm1, %ymm1
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm8, %zmm1, %zmm13
-; AVX512-FCP-NEXT:    vmovdqa 32(%rsi), %xmm1
+; AVX512-FCP-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512-FCP-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm15
+; AVX512-FCP-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm0 # 16-byte Folded Reload
+; AVX512-FCP-NEXT:    # xmm0 = xmm14[0],mem[0],xmm14[1],mem[1],xmm14[2],mem[2],xmm14[3],mem[3],xmm14[4],mem[4],xmm14[5],mem[5],xmm14[6],mem[6],xmm14[7],mem[7]
+; AVX512-FCP-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512-FCP-NEXT:    vpshufb %ymm11, %ymm0, %ymm8
+; AVX512-FCP-NEXT:    vpshufb %ymm10, %ymm0, %ymm0
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm13
+; AVX512-FCP-NEXT:    vmovdqa 32(%rsi), %xmm0
 ; AVX512-FCP-NEXT:    vmovdqa 32(%rdi), %xmm8
-; AVX512-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3],xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7]
-; AVX512-FCP-NEXT:    vpshufb %xmm7, %xmm11, %xmm14
+; AVX512-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3],xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
+; AVX512-FCP-NEXT:    vpshufb %xmm5, %xmm11, %xmm14
 ; AVX512-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm21 = xmm11[0],zero,zero,zero,xmm11[1],zero,zero,zero
 ; AVX512-FCP-NEXT:    vinserti32x4 $1, %xmm14, %ymm21, %ymm14
 ; AVX512-FCP-NEXT:    vinserti128 $1, %xmm11, %ymm11, %ymm11
-; AVX512-FCP-NEXT:    vpshufb %ymm5, %ymm11, %ymm11
+; AVX512-FCP-NEXT:    vpshufb %ymm4, %ymm11, %ymm11
 ; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm11, %zmm14, %zmm11
-; AVX512-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm8[8],xmm1[8],xmm8[9],xmm1[9],xmm8[10],xmm1[10],xmm8[11],xmm1[11],xmm8[12],xmm1[12],xmm8[13],xmm1[13],xmm8[14],xmm1[14],xmm8[15],xmm1[15]
-; AVX512-FCP-NEXT:    vpshufb %xmm7, %xmm1, %xmm8
-; AVX512-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm14 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm8[8],xmm0[8],xmm8[9],xmm0[9],xmm8[10],xmm0[10],xmm8[11],xmm0[11],xmm8[12],xmm0[12],xmm8[13],xmm0[13],xmm8[14],xmm0[14],xmm8[15],xmm0[15]
+; AVX512-FCP-NEXT:    vpshufb %xmm5, %xmm0, %xmm8
+; AVX512-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm14 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; AVX512-FCP-NEXT:    vinserti128 $1, %xmm8, %ymm14, %ymm8
-; AVX512-FCP-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX512-FCP-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm14
-; AVX512-FCP-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512-FCP-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX512-FCP-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX512-FCP-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm17, %ymm2
-; AVX512-FCP-NEXT:    vpshufb %ymm2, %ymm1, %ymm8
-; AVX512-FCP-NEXT:    vmovdqa64 %ymm18, %ymm2
-; AVX512-FCP-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm8, %zmm1, %zmm21
+; AVX512-FCP-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512-FCP-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm14
+; AVX512-FCP-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512-FCP-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-FCP-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; AVX512-FCP-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512-FCP-NEXT:    vmovdqa64 %ymm17, %ymm1
+; AVX512-FCP-NEXT:    vpshufb %ymm1, %ymm0, %ymm8
+; AVX512-FCP-NEXT:    vmovdqa64 %ymm18, %ymm1
+; AVX512-FCP-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm21
 ; AVX512-FCP-NEXT:    vmovdqa 16(%rsi), %xmm8
 ; AVX512-FCP-NEXT:    vmovdqa 16(%rdi), %xmm10
-; AVX512-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3],xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
-; AVX512-FCP-NEXT:    vpshufb %xmm7, %xmm1, %xmm2
-; AVX512-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm17 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX512-FCP-NEXT:    vinserti32x4 $1, %xmm2, %ymm17, %ymm2
-; AVX512-FCP-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX512-FCP-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
-; AVX512-FCP-NEXT:    vpshufb %xmm7, %xmm2, %xmm8
-; AVX512-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3],xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
+; AVX512-FCP-NEXT:    vpshufb %xmm5, %xmm0, %xmm1
+; AVX512-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm17 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512-FCP-NEXT:    vinserti32x4 $1, %xmm1, %ymm17, %ymm1
+; AVX512-FCP-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512-FCP-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
+; AVX512-FCP-NEXT:    vpshufb %xmm5, %xmm1, %xmm8
+; AVX512-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
 ; AVX512-FCP-NEXT:    vinserti128 $1, %xmm8, %ymm10, %ymm8
-; AVX512-FCP-NEXT:    vinserti128 $1, %xmm2, %ymm2, %ymm2
-; AVX512-FCP-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm8, %zmm2
+; AVX512-FCP-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
+; AVX512-FCP-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm1
 ; AVX512-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm9[0],xmm12[0],xmm9[1],xmm12[1],xmm9[2],xmm12[2],xmm9[3],xmm12[3],xmm9[4],xmm12[4],xmm9[5],xmm12[5],xmm9[6],xmm12[6],xmm9[7],xmm12[7]
 ; AVX512-FCP-NEXT:    vinserti128 $1, %xmm8, %ymm8, %ymm9
-; AVX512-FCP-NEXT:    vpshufb %ymm5, %ymm9, %ymm5
-; AVX512-FCP-NEXT:    vpshufb %xmm7, %xmm8, %xmm7
+; AVX512-FCP-NEXT:    vpshufb %ymm4, %ymm9, %ymm4
+; AVX512-FCP-NEXT:    vpshufb %xmm5, %xmm8, %xmm5
 ; AVX512-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero
-; AVX512-FCP-NEXT:    vinserti128 $1, %xmm7, %ymm8, %ymm7
-; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm5, %zmm7, %zmm5
-; AVX512-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm6 # 64-byte Folded Reload
-; AVX512-FCP-NEXT:    vpandnq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm7 # 64-byte Folded Reload
-; AVX512-FCP-NEXT:    vpandq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 64-byte Folded Reload
-; AVX512-FCP-NEXT:    vpord %zmm7, %zmm8, %zmm6 {%k1}
-; AVX512-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm15 # 64-byte Folded Reload
-; AVX512-FCP-NEXT:    vpandnq (%rsp), %zmm0, %zmm7 # 64-byte Folded Reload
-; AVX512-FCP-NEXT:    vpandq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 64-byte Folded Reload
-; AVX512-FCP-NEXT:    vpord %zmm7, %zmm8, %zmm15 {%k1}
-; AVX512-FCP-NEXT:    vpternlogq $226, %zmm20, %zmm3, %zmm11
-; AVX512-FCP-NEXT:    vpandnq %zmm22, %zmm0, %zmm7
-; AVX512-FCP-NEXT:    vpandq %zmm0, %zmm23, %zmm8
-; AVX512-FCP-NEXT:    vpord %zmm7, %zmm8, %zmm11 {%k1}
-; AVX512-FCP-NEXT:    vpternlogq $226, %zmm24, %zmm3, %zmm14
-; AVX512-FCP-NEXT:    vpandnq %zmm25, %zmm0, %zmm7
-; AVX512-FCP-NEXT:    vpandq %zmm0, %zmm26, %zmm8
-; AVX512-FCP-NEXT:    vpord %zmm7, %zmm8, %zmm14 {%k1}
-; AVX512-FCP-NEXT:    vpternlogq $226, %zmm27, %zmm3, %zmm1
-; AVX512-FCP-NEXT:    vpandnq %zmm28, %zmm0, %zmm7
-; AVX512-FCP-NEXT:    vpandq %zmm0, %zmm29, %zmm8
-; AVX512-FCP-NEXT:    vpord %zmm7, %zmm8, %zmm1 {%k1}
-; AVX512-FCP-NEXT:    vpternlogq $226, %zmm30, %zmm3, %zmm2
-; AVX512-FCP-NEXT:    vpandnq %zmm31, %zmm0, %zmm7
-; AVX512-FCP-NEXT:    vpandq %zmm0, %zmm16, %zmm8
-; AVX512-FCP-NEXT:    vpord %zmm7, %zmm8, %zmm2 {%k1}
-; AVX512-FCP-NEXT:    vpternlogq $226, %zmm19, %zmm3, %zmm5
-; AVX512-FCP-NEXT:    vpandnq %zmm13, %zmm0, %zmm3
-; AVX512-FCP-NEXT:    vpandq %zmm0, %zmm21, %zmm0
-; AVX512-FCP-NEXT:    vpord %zmm3, %zmm0, %zmm5 {%k1}
+; AVX512-FCP-NEXT:    vinserti128 $1, %xmm5, %ymm8, %ymm5
+; AVX512-FCP-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm6 # 64-byte Folded Reload
+; AVX512-FCP-NEXT:    vpandnq {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm5 # 64-byte Folded Reload
+; AVX512-FCP-NEXT:    vpandq {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm8 # 64-byte Folded Reload
+; AVX512-FCP-NEXT:    vpord %zmm5, %zmm8, %zmm6 {%k1}
+; AVX512-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm15 # 64-byte Folded Reload
+; AVX512-FCP-NEXT:    vpandnq (%rsp), %zmm2, %zmm5 # 64-byte Folded Reload
+; AVX512-FCP-NEXT:    vpandq {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm8 # 64-byte Folded Reload
+; AVX512-FCP-NEXT:    vpord %zmm5, %zmm8, %zmm15 {%k1}
+; AVX512-FCP-NEXT:    vpternlogq $226, %zmm20, %zmm7, %zmm11
+; AVX512-FCP-NEXT:    vpandnq %zmm22, %zmm2, %zmm5
+; AVX512-FCP-NEXT:    vpandq %zmm2, %zmm23, %zmm8
+; AVX512-FCP-NEXT:    vpord %zmm5, %zmm8, %zmm11 {%k1}
+; AVX512-FCP-NEXT:    vpternlogq $226, %zmm24, %zmm7, %zmm14
+; AVX512-FCP-NEXT:    vpandnq %zmm25, %zmm2, %zmm5
+; AVX512-FCP-NEXT:    vpandq %zmm2, %zmm26, %zmm8
+; AVX512-FCP-NEXT:    vpord %zmm5, %zmm8, %zmm14 {%k1}
+; AVX512-FCP-NEXT:    vpternlogq $226, %zmm27, %zmm7, %zmm0
+; AVX512-FCP-NEXT:    vpandnq %zmm28, %zmm2, %zmm5
+; AVX512-FCP-NEXT:    vpandq %zmm2, %zmm29, %zmm8
+; AVX512-FCP-NEXT:    vpord %zmm5, %zmm8, %zmm0 {%k1}
+; AVX512-FCP-NEXT:    vpternlogq $226, %zmm30, %zmm7, %zmm1
+; AVX512-FCP-NEXT:    vpandnq %zmm31, %zmm2, %zmm5
+; AVX512-FCP-NEXT:    vpandq %zmm2, %zmm16, %zmm8
+; AVX512-FCP-NEXT:    vpord %zmm5, %zmm8, %zmm1 {%k1}
+; AVX512-FCP-NEXT:    vpternlogq $226, %zmm19, %zmm7, %zmm4
+; AVX512-FCP-NEXT:    vpandnq %zmm13, %zmm2, %zmm5
+; AVX512-FCP-NEXT:    vpandq %zmm2, %zmm21, %zmm2
+; AVX512-FCP-NEXT:    vpord %zmm5, %zmm2, %zmm4 {%k1}
 ; AVX512-FCP-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm2, 192(%rax)
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, 128(%rax)
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm4, (%rax)
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm1, 192(%rax)
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm0, 128(%rax)
 ; AVX512-FCP-NEXT:    vmovdqa64 %zmm14, 320(%rax)
 ; AVX512-FCP-NEXT:    vmovdqa64 %zmm11, 256(%rax)
 ; AVX512-FCP-NEXT:    vmovdqa64 %zmm15, 448(%rax)
 ; AVX512-FCP-NEXT:    vmovdqa64 %zmm6, 384(%rax)
-; AVX512-FCP-NEXT:    vmovdqa64 %zmm4, 64(%rax)
+; AVX512-FCP-NEXT:    vmovdqa64 %zmm3, 64(%rax)
 ; AVX512-FCP-NEXT:    addq $392, %rsp # imm = 0x188
 ; AVX512-FCP-NEXT:    vzeroupper
 ; AVX512-FCP-NEXT:    retq
 ;
 ; AVX512DQ-LABEL: store_i8_stride8_vf64:
 ; AVX512DQ:       # %bb.0:
-; AVX512DQ-NEXT:    subq $648, %rsp # imm = 0x288
+; AVX512DQ-NEXT:    subq $680, %rsp # imm = 0x2A8
 ; AVX512DQ-NEXT:    movq {{[0-9]+}}(%rsp), %rax
 ; AVX512DQ-NEXT:    movq {{[0-9]+}}(%rsp), %r10
 ; AVX512DQ-NEXT:    vmovdqa (%rcx), %xmm1
@@ -7887,8 +7891,8 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-NEXT:    vmovdqa 48(%rax), %xmm5
 ; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm0[0,2,2,3,4,5,6,7]
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm8, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm0[0,2,2,3,4,5,6,7]
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm7, %ymm1, %ymm1
 ; AVX512DQ-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,4,6,5]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
@@ -7896,10 +7900,10 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-NEXT:    vmovdqa (%r9), %xmm1
 ; AVX512DQ-NEXT:    vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-NEXT:    vmovdqa 48(%r9), %xmm12
+; AVX512DQ-NEXT:    vmovdqa 48(%r9), %xmm7
 ; AVX512DQ-NEXT:    vmovdqa (%r8), %xmm0
 ; AVX512DQ-NEXT:    vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; AVX512DQ-NEXT:    vmovdqa 48(%r8), %xmm13
+; AVX512DQ-NEXT:    vmovdqa 48(%r8), %xmm12
 ; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,1,1,3,4,5,6,7]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm0[2,1,3,3,4,5,6,7]
@@ -7907,7 +7911,8 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,5,7]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,7,7]
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm8
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512DQ-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm9 = xmm0[0,2,2,3,4,5,6,7]
@@ -7915,8 +7920,7 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,4,6,5]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512DQ-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm9
 ; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm11 = xmm0[0,2,2,3,4,5,6,7]
@@ -7926,69 +7930,70 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
 ; AVX512DQ-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX512DQ-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
+; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm12[0],xmm7[0],xmm12[1],xmm7[1],xmm12[2],xmm7[2],xmm12[3],xmm7[3],xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,1,1,3,4,5,6,7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm0[2,1,3,3,4,5,6,7]
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm14, %ymm1, %ymm1
-; AVX512DQ-NEXT:    vmovdqu %ymm1, (%rsp) # 32-byte Spill
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm0[2,1,3,3,4,5,6,7]
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm13, %ymm1, %ymm1
+; AVX512DQ-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-NEXT:    vmovdqa 32(%rdx), %xmm1
-; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm0[0,1,2,3,4,5,5,7]
+; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm0[0,1,2,3,4,5,5,7]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,7,7]
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm0, %ymm14, %ymm28
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm0, %ymm13, %ymm0
+; AVX512DQ-NEXT:    vmovdqu %ymm0, (%rsp) # 32-byte Spill
 ; AVX512DQ-NEXT:    vmovdqa 32(%r10), %xmm0
 ; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[0,0,2,1,4,5,6,7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm3[0,2,2,3,4,5,6,7]
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm14, %ymm2, %ymm11
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm3[0,2,2,3,4,5,6,7]
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm13, %ymm2, %ymm11
 ; AVX512DQ-NEXT:    vmovdqa 32(%rax), %xmm2
-; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm14 = xmm3[0,1,2,3,4,4,6,5]
+; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm3[0,1,2,3,4,4,6,5]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm3, %ymm14, %ymm31
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm3, %ymm13, %ymm31
 ; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm3[0,0,2,1,4,5,6,7]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[0,2,2,3,4,5,6,7]
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm5, %ymm4, %ymm24
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm5, %ymm4, %ymm28
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm3[0,1,2,3,4,4,6,5]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm3, %ymm4, %ymm22
-; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm13[8],xmm12[8],xmm13[9],xmm12[9],xmm13[10],xmm12[10],xmm13[11],xmm12[11],xmm13[12],xmm12[12],xmm13[13],xmm12[13],xmm13[14],xmm12[14],xmm13[15],xmm12[15]
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm3, %ymm4, %ymm23
+; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm12[8],xmm7[8],xmm12[9],xmm7[9],xmm12[10],xmm7[10],xmm12[11],xmm7[11],xmm12[12],xmm7[12],xmm12[13],xmm7[13],xmm12[14],xmm7[14],xmm12[15],xmm7[15]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm4 = xmm3[0,1,1,3,4,5,6,7]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[2,1,3,3,4,5,6,7]
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm5, %ymm4, %ymm20
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm5, %ymm4, %ymm21
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm4 = xmm3[0,1,2,3,4,5,5,7]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,7,7]
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm3, %ymm4, %ymm18
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm3, %ymm4, %ymm20
 ; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm10[0],xmm1[1],xmm10[1],xmm1[2],xmm10[2],xmm1[3],xmm10[3],xmm1[4],xmm10[4],xmm1[5],xmm10[5],xmm1[6],xmm10[6],xmm1[7],xmm10[7]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[0,0,2,1,4,5,6,7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm3[0,2,2,3,4,5,6,7]
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm12, %ymm5, %ymm30
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm3[0,2,2,3,4,5,6,7]
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm7, %ymm5, %ymm30
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,4,4,6,5]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
 ; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm3, %ymm5, %ymm29
 ; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm3 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[0,0,2,1,4,5,6,7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm3[0,2,2,3,4,5,6,7]
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm12, %ymm5, %ymm23
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm3[0,2,2,3,4,5,6,7]
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm7, %ymm5, %ymm24
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,4,4,6,5]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm3, %ymm5, %ymm21
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm3, %ymm5, %ymm22
 ; AVX512DQ-NEXT:    vmovdqa 32(%r9), %xmm3
 ; AVX512DQ-NEXT:    vmovdqa 32(%r8), %xmm5
-; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3],xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm12[0,1,1,3,4,5,6,7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm14 = xmm12[2,1,3,3,4,5,6,7]
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm14, %ymm13, %ymm19
-; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm13 = xmm12[0,1,2,3,4,5,5,7]
-; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm12[0,1,2,3,6,5,7,7]
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm12, %ymm13, %ymm17
+; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3],xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7]
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm7[0,1,1,3,4,5,6,7]
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm7[2,1,3,3,4,5,6,7]
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm13, %ymm12, %ymm19
+; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm12 = xmm7[0,1,2,3,4,5,5,7]
+; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,5,7,7]
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm7, %ymm12, %ymm18
 ; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm10[8],xmm1[9],xmm10[9],xmm1[10],xmm10[10],xmm1[11],xmm10[11],xmm1[12],xmm10[12],xmm1[13],xmm10[13],xmm1[14],xmm10[14],xmm1[15],xmm10[15]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm1[0,0,2,1,4,5,6,7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm1[0,2,2,3,4,5,6,7]
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm12, %ymm10, %ymm4
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm1[0,0,2,1,4,5,6,7]
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm1[0,2,2,3,4,5,6,7]
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm10, %ymm7, %ymm4
 ; AVX512DQ-NEXT:    vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm1[0,1,2,3,4,4,6,5]
+; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,4,4,6,5]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm1, %ymm10, %ymm1
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm1, %ymm7, %ymm1
 ; AVX512DQ-NEXT:    vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
@@ -8008,9 +8013,9 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,7,7]
 ; AVX512DQ-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX512DQ-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT:    vmovdqa 16(%rcx), %xmm13
+; AVX512DQ-NEXT:    vmovdqa 16(%rcx), %xmm14
 ; AVX512DQ-NEXT:    vmovdqa 16(%rdx), %xmm12
-; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm12[0],xmm13[0],xmm12[1],xmm13[1],xmm12[2],xmm13[2],xmm12[3],xmm13[3],xmm12[4],xmm13[4],xmm12[5],xmm13[5],xmm12[6],xmm13[6],xmm12[7],xmm13[7]
+; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm12[0],xmm14[0],xmm12[1],xmm14[1],xmm12[2],xmm14[2],xmm12[3],xmm14[3],xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm0[0,2,2,3,4,5,6,7]
 ; AVX512DQ-NEXT:    vinserti128 $1, %xmm5, %ymm1, %ymm1
@@ -8023,7 +8028,7 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-NEXT:    vmovdqa 16(%rax), %xmm2
 ; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
 ; AVX512DQ-NEXT:    vmovdqa64 %xmm2, %xmm26
-; AVX512DQ-NEXT:    vmovdqa64 %xmm1, %xmm16
+; AVX512DQ-NEXT:    vmovdqa64 %xmm1, %xmm17
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[0,0,2,1,4,5,6,7]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm0[0,2,2,3,4,5,6,7]
 ; AVX512DQ-NEXT:    vinserti128 $1, %xmm5, %ymm1, %ymm1
@@ -8032,19 +8037,20 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
 ; AVX512DQ-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
 ; AVX512DQ-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT:    vmovdqa 16(%r9), %xmm15
-; AVX512DQ-NEXT:    vmovdqa 16(%r8), %xmm14
-; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm7 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3],xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm7[0,1,1,3,4,5,6,7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm7[2,1,3,3,4,5,6,7]
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm0
+; AVX512DQ-NEXT:    vmovdqa 16(%r9), %xmm0
+; AVX512DQ-NEXT:    vmovdqa 16(%r8), %xmm15
+; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; AVX512DQ-NEXT:    vmovdqa64 %xmm0, %xmm16
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm6[0,1,1,3,4,5,6,7]
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm6[2,1,3,3,4,5,6,7]
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm13, %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
 ; AVX512DQ-NEXT:    vmovdqa (%rsi), %xmm1
 ; AVX512DQ-NEXT:    vmovdqa (%rdi), %xmm2
 ; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
 ; AVX512DQ-NEXT:    vmovdqa64 %xmm2, %xmm25
 ; AVX512DQ-NEXT:    vmovdqa64 %xmm1, %xmm27
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[2,3,2,3]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm13 = xmm0[2,3,2,3]
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
 ; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
@@ -8053,60 +8059,61 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
 ; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15]
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm9 = xmm0[3,3,3,3]
-; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[3,3,3,3]
+; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm8 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero
+; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm13[0],zero,zero,zero,xmm13[1],zero,zero,zero
 ; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm3, %ymm6, %ymm3
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm3, %ymm10, %ymm3
 ; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
 ; AVX512DQ-NEXT:    vinserti128 $1, %xmm4, %ymm5, %ymm4
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm4
-; AVX512DQ-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
-; AVX512DQ-NEXT:    # ymm3 = mem[0,1,1,3,4,5,5,7]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm3, %zmm4, %zmm3
+; AVX512DQ-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
+; AVX512DQ-NEXT:    # ymm4 = mem[0,1,1,3,4,5,5,7]
 ; AVX512DQ-NEXT:    vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
 ; AVX512DQ-NEXT:    # ymm5 = mem[2,1,3,3,6,5,7,7]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm6
-; AVX512DQ-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535]
-; AVX512DQ-NEXT:    vpternlogq $184, %zmm4, %zmm3, %zmm6
-; AVX512DQ-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512DQ-NEXT:    # ymm4 = mem[0,0,2,1,4,4,6,5]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm10
+; AVX512DQ-NEXT:    vpbroadcastq {{.*#+}} zmm13 = [65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535]
+; AVX512DQ-NEXT:    vpternlogq $184, %zmm3, %zmm13, %zmm10
+; AVX512DQ-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
+; AVX512DQ-NEXT:    # ymm3 = mem[0,0,2,1,4,4,6,5]
+; AVX512DQ-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
+; AVX512DQ-NEXT:    # ymm4 = mem[0,2,2,3,4,6,6,7]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm4
+; AVX512DQ-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Folded Reload
+; AVX512DQ-NEXT:    # ymm3 = mem[0,0,2,1,4,4,6,5]
 ; AVX512DQ-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
 ; AVX512DQ-NEXT:    # ymm5 = mem[0,2,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
-; AVX512DQ-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
-; AVX512DQ-NEXT:    # ymm5 = mem[0,0,2,1,4,4,6,5]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm8 = ymm8[0,2,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm8, %zmm5, %zmm5
-; AVX512DQ-NEXT:    vpbroadcastq {{.*#+}} zmm8 = [65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0]
-; AVX512DQ-NEXT:    vpandnq %zmm4, %zmm8, %zmm4
-; AVX512DQ-NEXT:    vpandq %zmm8, %zmm5, %zmm5
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm5
+; AVX512DQ-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0]
+; AVX512DQ-NEXT:    vpandnq %zmm4, %zmm3, %zmm4
+; AVX512DQ-NEXT:    vpandq %zmm3, %zmm5, %zmm5
 ; AVX512DQ-NEXT:    movw $-21846, %ax # imm = 0xAAAA
 ; AVX512DQ-NEXT:    kmovw %eax, %k1
-; AVX512DQ-NEXT:    vpord %zmm4, %zmm5, %zmm6 {%k1}
+; AVX512DQ-NEXT:    vpord %zmm4, %zmm5, %zmm10 {%k1}
 ; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
-; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero
+; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
 ; AVX512DQ-NEXT:    vinserti128 $1, %xmm4, %ymm2, %ymm2
 ; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm0, %ymm10, %ymm0
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm0, %ymm8, %ymm0
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
 ; AVX512DQ-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX512DQ-NEXT:    # ymm2 = mem[0,1,1,3,4,5,5,7]
-; AVX512DQ-NEXT:    vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload
-; AVX512DQ-NEXT:    # ymm4 = mem[2,1,3,3,6,5,7,7]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm4 = ymm9[2,1,3,3,6,5,7,7]
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm9
-; AVX512DQ-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm9
+; AVX512DQ-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm9
 ; AVX512DQ-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Folded Reload
 ; AVX512DQ-NEXT:    # ymm0 = mem[0,0,2,1,4,4,6,5]
 ; AVX512DQ-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX512DQ-NEXT:    # ymm2 = mem[0,2,2,3,4,6,6,7]
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512DQ-NEXT:    vpshufd $96, (%rsp), %ymm2 # 32-byte Folded Reload
+; AVX512DQ-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Folded Reload
 ; AVX512DQ-NEXT:    # ymm2 = mem[0,0,2,1,4,4,6,5]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm4 = ymm28[0,2,2,3,4,6,6,7]
+; AVX512DQ-NEXT:    vpshufd $232, (%rsp), %ymm4 # 32-byte Folded Reload
+; AVX512DQ-NEXT:    # ymm4 = mem[0,2,2,3,4,6,6,7]
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm4, %zmm2, %zmm2
-; AVX512DQ-NEXT:    vpandnq %zmm0, %zmm8, %zmm0
-; AVX512DQ-NEXT:    vpandq %zmm8, %zmm2, %zmm2
+; AVX512DQ-NEXT:    vpandnq %zmm0, %zmm3, %zmm0
+; AVX512DQ-NEXT:    vpandq %zmm3, %zmm2, %zmm2
 ; AVX512DQ-NEXT:    vpord %zmm0, %zmm2, %zmm9 {%k1}
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
 ; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
@@ -8121,154 +8128,155 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm1 = ymm11[0,1,1,3,4,5,5,7]
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm2 = ymm31[2,1,3,3,6,5,7,7]
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm11
-; AVX512DQ-NEXT:    vpternlogq $184, %zmm0, %zmm3, %zmm11
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm0 = ymm24[0,0,2,1,4,4,6,5]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm1 = ymm22[0,2,2,3,4,6,6,7]
+; AVX512DQ-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm11
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm0 = ymm28[0,0,2,1,4,4,6,5]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm1 = ymm23[0,2,2,3,4,6,6,7]
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm1 = ymm20[0,0,2,1,4,4,6,5]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm2 = ymm18[0,2,2,3,4,6,6,7]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm1 = ymm21[0,0,2,1,4,4,6,5]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm2 = ymm20[0,2,2,3,4,6,6,7]
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512DQ-NEXT:    vpandnq %zmm0, %zmm8, %zmm0
-; AVX512DQ-NEXT:    vpandq %zmm8, %zmm1, %zmm1
+; AVX512DQ-NEXT:    vpandnq %zmm0, %zmm3, %zmm0
+; AVX512DQ-NEXT:    vpandq %zmm3, %zmm1, %zmm1
 ; AVX512DQ-NEXT:    vpord %zmm0, %zmm1, %zmm11 {%k1}
-; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm7[0,1,2,3,4,5,5,7]
-; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm7[0,1,2,3,6,5,7,7]
+; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,4,5,5,7]
+; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,6,5,7,7]
 ; AVX512DQ-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
-; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm12[8],xmm13[8],xmm12[9],xmm13[9],xmm12[10],xmm13[10],xmm12[11],xmm13[11],xmm12[12],xmm13[12],xmm12[13],xmm13[13],xmm12[14],xmm13[14],xmm12[15],xmm13[15]
+; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm12[8],xmm14[8],xmm12[9],xmm14[9],xmm12[10],xmm14[10],xmm12[11],xmm14[11],xmm12[12],xmm14[12],xmm12[13],xmm14[13],xmm12[14],xmm14[14],xmm12[15],xmm14[15]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm2[0,0,2,1,4,5,6,7]
 ; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm2[0,2,2,3,4,5,6,7]
 ; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm1, %ymm0, %ymm28
-; AVX512DQ-NEXT:    vmovdqa 32(%rsi), %xmm0
+; AVX512DQ-NEXT:    vmovdqa 32(%rsi), %xmm5
 ; AVX512DQ-NEXT:    vmovdqa 32(%rdi), %xmm1
-; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm5 = xmm4[2,3,2,3]
-; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[3,3,3,3]
-; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm7, %ymm5, %ymm5
-; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm7 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[1,1,1,1]
+; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
 ; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm4, %ymm7, %ymm4
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm5
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[3,3,3,3]
+; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm6, %ymm4, %ymm4
+; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm6 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm0, %ymm6, %ymm0
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm4 = ymm30[0,1,1,3,4,5,5,7]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm7 = ymm29[2,1,3,3,6,5,7,7]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm7, %zmm4, %zmm4
-; AVX512DQ-NEXT:    vpternlogq $184, %zmm5, %zmm3, %zmm4
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm5 = ymm23[0,0,2,1,4,4,6,5]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm7 = ymm21[0,2,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm7, %zmm5, %zmm5
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm7 = ymm19[0,0,2,1,4,4,6,5]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm10 = ymm17[0,2,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm10, %zmm7, %zmm7
-; AVX512DQ-NEXT:    vpandnq %zmm5, %zmm8, %zmm5
-; AVX512DQ-NEXT:    vpandq %zmm8, %zmm7, %zmm7
-; AVX512DQ-NEXT:    vpord %zmm5, %zmm7, %zmm4 {%k1}
-; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,6,5]
-; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm2, %ymm5, %ymm18
-; AVX512DQ-NEXT:    vmovdqa64 %xmm26, %xmm2
-; AVX512DQ-NEXT:    vmovdqa64 %xmm16, %xmm5
-; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm5[8],xmm2[9],xmm5[9],xmm2[10],xmm5[10],xmm2[11],xmm5[11],xmm2[12],xmm5[12],xmm2[13],xmm5[13],xmm2[14],xmm5[14],xmm2[15],xmm5[15]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm2[0,0,2,1,4,5,6,7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm2[0,2,2,3,4,5,6,7]
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm7, %ymm5, %ymm21
-; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm2[0,1,2,3,4,4,6,5]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm6 = ymm29[2,1,3,3,6,5,7,7]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm4
+; AVX512DQ-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm4
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm0 = ymm24[0,0,2,1,4,4,6,5]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm6 = ymm22[0,2,2,3,4,6,6,7]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm0
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm6 = ymm19[0,0,2,1,4,4,6,5]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm7 = ymm18[0,2,2,3,4,6,6,7]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm7, %zmm6, %zmm6
+; AVX512DQ-NEXT:    vpandnq %zmm0, %zmm3, %zmm0
+; AVX512DQ-NEXT:    vpandq %zmm3, %zmm6, %zmm6
+; AVX512DQ-NEXT:    vpord %zmm0, %zmm6, %zmm4 {%k1}
+; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,4,6,5]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm2, %ymm5, %ymm22
-; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm14[8],xmm15[8],xmm14[9],xmm15[9],xmm14[10],xmm15[10],xmm14[11],xmm15[11],xmm14[12],xmm15[12],xmm14[13],xmm15[13],xmm14[14],xmm15[14],xmm14[15],xmm15[15]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm5[0,1,1,3,4,5,6,7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm5[2,1,3,3,4,5,6,7]
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm2, %ymm0, %ymm18
+; AVX512DQ-NEXT:    vmovdqa64 %xmm26, %xmm0
+; AVX512DQ-NEXT:    vmovdqa64 %xmm17, %xmm2
+; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm0[0,0,2,1,4,5,6,7]
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm6 = xmm0[0,2,2,3,4,5,6,7]
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm6, %ymm2, %ymm21
+; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,6,5]
+; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm0, %ymm2, %ymm22
+; AVX512DQ-NEXT:    vmovdqa64 %xmm16, %xmm0
+; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm15[8],xmm0[8],xmm15[9],xmm0[9],xmm15[10],xmm0[10],xmm15[11],xmm0[11],xmm15[12],xmm0[12],xmm15[13],xmm0[13],xmm15[14],xmm0[14],xmm15[15],xmm0[15]
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm6[0,1,1,3,4,5,6,7]
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm6[2,1,3,3,4,5,6,7]
 ; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm7, %ymm2, %ymm26
-; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm5[0,1,2,3,4,5,5,7]
-; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,7,7]
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm5, %ymm7, %ymm19
-; AVX512DQ-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512DQ-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm5 # 16-byte Folded Reload
-; AVX512DQ-NEXT:    # xmm5 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3],xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm5[0,0,2,1,4,5,6,7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm5[0,2,2,3,4,5,6,7]
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm13, %ymm10, %ymm14
-; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm5[0,1,2,3,4,4,6,5]
-; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm5, %ymm10, %ymm15
-; AVX512DQ-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; AVX512DQ-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm5 # 16-byte Folded Reload
-; AVX512DQ-NEXT:    # xmm5 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3],xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm5[0,0,2,1,4,5,6,7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm13 = xmm5[0,2,2,3,4,5,6,7]
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm13, %ymm10, %ymm17
-; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm5[0,1,2,3,4,4,6,5]
-; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm5, %ymm10, %ymm20
-; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm6[0,1,2,3,4,5,5,7]
+; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,5,7,7]
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm6, %ymm7, %ymm19
+; AVX512DQ-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512DQ-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm7 # 16-byte Folded Reload
+; AVX512DQ-NEXT:    # xmm7 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm7[0,0,2,1,4,5,6,7]
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm7[0,2,2,3,4,5,6,7]
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm12, %ymm8, %ymm14
+; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm7[0,1,2,3,4,4,6,5]
+; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,6,7]
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm7, %ymm8, %ymm15
+; AVX512DQ-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512DQ-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm7 # 16-byte Folded Reload
+; AVX512DQ-NEXT:    # xmm7 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm7[0,0,2,1,4,5,6,7]
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm12 = xmm7[0,2,2,3,4,5,6,7]
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm12, %ymm8, %ymm17
+; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm8 = xmm7[0,1,2,3,4,4,6,5]
+; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,6,7]
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm7, %ymm8, %ymm20
+; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm1[8],xmm5[8],xmm1[9],xmm5[9],xmm1[10],xmm5[10],xmm1[11],xmm5[11],xmm1[12],xmm5[12],xmm1[13],xmm5[13],xmm1[14],xmm5[14],xmm1[15],xmm5[15]
 ; AVX512DQ-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
 ; AVX512DQ-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm1 # 16-byte Folded Reload
 ; AVX512DQ-NEXT:    # xmm1 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm1[0,1,1,3,4,5,6,7]
-; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm10 = xmm1[2,1,3,3,4,5,6,7]
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm10, %ymm0, %ymm0
-; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm10 = xmm1[0,1,2,3,4,5,5,7]
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm7 = xmm1[0,1,1,3,4,5,6,7]
+; AVX512DQ-NEXT:    vpshuflw {{.*#+}} xmm8 = xmm1[2,1,3,3,4,5,6,7]
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm8, %ymm7, %ymm8
+; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,4,5,5,7]
 ; AVX512DQ-NEXT:    vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,7,7]
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm1, %ymm10, %ymm1
-; AVX512DQ-NEXT:    vmovdqa 16(%rsi), %xmm10
-; AVX512DQ-NEXT:    vmovdqa 16(%rdi), %xmm13
-; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm12 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3],xmm13[4],xmm10[4],xmm13[5],xmm10[5],xmm13[6],xmm10[6],xmm13[7],xmm10[7]
-; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm10 = xmm13[8],xmm10[8],xmm13[9],xmm10[9],xmm13[10],xmm10[10],xmm13[11],xmm10[11],xmm13[12],xmm10[12],xmm13[13],xmm10[13],xmm13[14],xmm10[14],xmm13[15],xmm10[15]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm13 = xmm5[2,3,2,3]
-; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm13 = xmm13[0],zero,zero,zero,xmm13[1],zero,zero,zero
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm1, %ymm7, %ymm1
+; AVX512DQ-NEXT:    vmovdqa 16(%rsi), %xmm7
+; AVX512DQ-NEXT:    vmovdqa 16(%rdi), %xmm12
+; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm12[0],xmm7[0],xmm12[1],xmm7[1],xmm12[2],xmm7[2],xmm12[3],xmm7[3],xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7]
+; AVX512DQ-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm12[8],xmm7[8],xmm12[9],xmm7[9],xmm12[10],xmm7[10],xmm12[11],xmm7[11],xmm12[12],xmm7[12],xmm12[13],xmm7[13],xmm12[14],xmm7[14],xmm12[15],xmm7[15]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm12 = xmm5[2,3,2,3]
+; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm12 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm23 = xmm5[3,3,3,3]
 ; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm23 = xmm23[0],zero,zero,zero,xmm23[1],zero,zero,zero
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm23, %ymm13, %ymm13
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm23, %ymm12, %ymm12
 ; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm23 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[1,1,1,1]
 ; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero
 ; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm5, %ymm23, %ymm5
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm13, %zmm5, %zmm13
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm12, %zmm5, %zmm12
 ; AVX512DQ-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5 # 32-byte Folded Reload
 ; AVX512DQ-NEXT:    # ymm5 = mem[0,1,1,3,4,5,5,7]
 ; AVX512DQ-NEXT:    vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
 ; AVX512DQ-NEXT:    # ymm23 = mem[2,1,3,3,6,5,7,7]
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm23, %zmm5, %zmm5
-; AVX512DQ-NEXT:    vpternlogq $184, %zmm13, %zmm3, %zmm5
-; AVX512DQ-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX512DQ-NEXT:    # ymm13 = mem[0,0,2,1,4,4,6,5]
+; AVX512DQ-NEXT:    vpternlogq $184, %zmm12, %zmm13, %zmm5
+; AVX512DQ-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512DQ-NEXT:    # ymm12 = mem[0,0,2,1,4,4,6,5]
 ; AVX512DQ-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
 ; AVX512DQ-NEXT:    # ymm23 = mem[0,2,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm23, %zmm13, %zmm13
-; AVX512DQ-NEXT:    vpandnq %zmm13, %zmm8, %zmm13
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm23, %zmm12, %zmm12
+; AVX512DQ-NEXT:    vpandnq %zmm12, %zmm3, %zmm12
 ; AVX512DQ-NEXT:    vpshufd $96, {{[-0-9]+}}(%r{{[sb]}}p), %ymm23 # 32-byte Folded Reload
 ; AVX512DQ-NEXT:    # ymm23 = mem[0,0,2,1,4,4,6,5]
 ; AVX512DQ-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm24 # 32-byte Folded Reload
 ; AVX512DQ-NEXT:    # ymm24 = mem[0,2,2,3,4,6,6,7]
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm24, %zmm23, %zmm23
-; AVX512DQ-NEXT:    vpandq %zmm8, %zmm23, %zmm23
-; AVX512DQ-NEXT:    vpord %zmm13, %zmm23, %zmm5 {%k1}
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm13 = xmm12[2,3,2,3]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm23 = xmm12[3,3,3,3]
-; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm24 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm12 = xmm12[1,1,1,1]
+; AVX512DQ-NEXT:    vpandq %zmm3, %zmm23, %zmm23
+; AVX512DQ-NEXT:    vpord %zmm12, %zmm23, %zmm5 {%k1}
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm12 = xmm0[2,3,2,3]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm23 = xmm0[3,3,3,3]
+; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm24 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
 ; AVX512DQ-NEXT:    vmovdqa64 %xmm25, %xmm2
-; AVX512DQ-NEXT:    vmovdqa64 %xmm27, %xmm7
-; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3],xmm2[4],xmm7[4],xmm2[5],xmm7[5],xmm2[6],xmm7[6],xmm2[7],xmm7[7]
-; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm13 = xmm13[0],zero,zero,zero,xmm13[1],zero,zero,zero
-; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm23 = xmm23[0],zero,zero,zero,xmm23[1],zero,zero,zero
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm23, %ymm13, %ymm13
+; AVX512DQ-NEXT:    vmovdqa64 %xmm27, %xmm6
+; AVX512DQ-NEXT:    vpunpcklbw {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1],xmm2[2],xmm6[2],xmm2[3],xmm6[3],xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
 ; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm12 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm12, %ymm24, %ymm12
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm23 = xmm10[2,3,2,3]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm24 = xmm10[3,3,3,3]
-; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm29 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm10 = xmm10[1,1,1,1]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm13, %zmm12, %zmm12
-; AVX512DQ-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm13 # 32-byte Folded Reload
-; AVX512DQ-NEXT:    # ymm13 = mem[0,1,1,3,4,5,5,7]
+; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm23 = xmm23[0],zero,zero,zero,xmm23[1],zero,zero,zero
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm23, %ymm12, %ymm12
+; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm0, %ymm24, %ymm0
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm23 = xmm7[2,3,2,3]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm24 = xmm7[3,3,3,3]
+; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm29 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm7 = xmm7[1,1,1,1]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm12, %zmm0, %zmm0
+; AVX512DQ-NEXT:    vpshufd $212, {{[-0-9]+}}(%r{{[sb]}}p), %ymm12 # 32-byte Folded Reload
+; AVX512DQ-NEXT:    # ymm12 = mem[0,1,1,3,4,5,5,7]
 ; AVX512DQ-NEXT:    vpshufd $246, {{[-0-9]+}}(%r{{[sb]}}p), %ymm30 # 32-byte Folded Reload
 ; AVX512DQ-NEXT:    # ymm30 = mem[2,1,3,3,6,5,7,7]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm30, %zmm13, %zmm13
-; AVX512DQ-NEXT:    vpternlogq $184, %zmm12, %zmm3, %zmm13
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm12 = xmm2[2,3,2,3]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm30, %zmm12, %zmm12
+; AVX512DQ-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm12
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm30 = xmm2[3,3,3,3]
 ; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm31 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[1,1,1,1]
@@ -8282,57 +8290,57 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-NEXT:    vpshufd $232, {{[-0-9]+}}(%r{{[sb]}}p), %ymm16 # 32-byte Folded Reload
 ; AVX512DQ-NEXT:    # ymm16 = mem[0,2,2,3,4,6,6,7]
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm16, %zmm27, %zmm16
-; AVX512DQ-NEXT:    vpandnq %zmm25, %zmm8, %zmm25
-; AVX512DQ-NEXT:    vpandq %zmm8, %zmm16, %zmm16
-; AVX512DQ-NEXT:    vpord %zmm25, %zmm16, %zmm13 {%k1}
+; AVX512DQ-NEXT:    vpandnq %zmm25, %zmm3, %zmm25
+; AVX512DQ-NEXT:    vpandq %zmm3, %zmm16, %zmm16
+; AVX512DQ-NEXT:    vpord %zmm25, %zmm16, %zmm12 {%k1}
 ; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm16 = xmm23[0],zero,zero,zero,xmm23[1],zero,zero,zero
 ; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm23 = xmm24[0],zero,zero,zero,xmm24[1],zero,zero,zero
 ; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm23, %ymm16, %ymm16
-; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero
-; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm10, %ymm29, %ymm10
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm16, %zmm10, %zmm10
+; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero
+; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm7, %ymm29, %ymm7
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm16, %zmm7, %zmm7
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm16 = ymm28[0,1,1,3,4,5,5,7]
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm18 = ymm18[2,1,3,3,6,5,7,7]
 ; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm18, %zmm16, %zmm16
-; AVX512DQ-NEXT:    vpternlogq $184, %zmm10, %zmm3, %zmm16
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm10 = ymm21[0,0,2,1,4,4,6,5]
+; AVX512DQ-NEXT:    vpternlogq $184, %zmm7, %zmm13, %zmm16
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm7 = ymm21[0,0,2,1,4,4,6,5]
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm18 = ymm22[0,2,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm18, %zmm10, %zmm10
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm18, %zmm7, %zmm7
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm18 = ymm26[0,0,2,1,4,4,6,5]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm7 = ymm19[0,2,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm7, %zmm18, %zmm7
-; AVX512DQ-NEXT:    vpandnq %zmm10, %zmm8, %zmm10
-; AVX512DQ-NEXT:    vpandq %zmm8, %zmm7, %zmm7
-; AVX512DQ-NEXT:    vpord %zmm10, %zmm7, %zmm16 {%k1}
-; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm7 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero
-; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm30[0],zero,zero,zero,xmm30[1],zero,zero,zero
-; AVX512DQ-NEXT:    vinserti128 $1, %xmm10, %ymm7, %ymm7
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm6 = ymm19[0,2,2,3,4,6,6,7]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm6, %zmm18, %zmm6
+; AVX512DQ-NEXT:    vpandnq %zmm7, %zmm3, %zmm7
+; AVX512DQ-NEXT:    vpandq %zmm3, %zmm6, %zmm6
+; AVX512DQ-NEXT:    vpord %zmm7, %zmm6, %zmm16 {%k1}
+; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm6 = xmm30[0],zero,zero,zero,xmm30[1],zero,zero,zero
+; AVX512DQ-NEXT:    vinserti128 $1, %xmm6, %ymm0, %ymm0
 ; AVX512DQ-NEXT:    vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
 ; AVX512DQ-NEXT:    vinserti32x4 $1, %xmm2, %ymm31, %ymm2
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm7, %zmm2, %zmm2
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm7 = ymm14[0,1,1,3,4,5,5,7]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm10 = ymm15[2,1,3,3,6,5,7,7]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm10, %zmm7, %zmm7
-; AVX512DQ-NEXT:    vpternlogq $184, %zmm2, %zmm3, %zmm7
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm2 = ymm17[0,0,2,1,4,4,6,5]
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm3 = ymm20[0,2,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm3, %zmm2, %zmm2
-; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm0 = ymm0[0,0,2,1,4,4,6,5]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm0, %zmm2, %zmm0
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm2 = ymm14[0,1,1,3,4,5,5,7]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm6 = ymm15[2,1,3,3,6,5,7,7]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm6, %zmm2, %zmm2
+; AVX512DQ-NEXT:    vpternlogq $184, %zmm0, %zmm13, %zmm2
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm0 = ymm17[0,0,2,1,4,4,6,5]
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm6 = ymm20[0,2,2,3,4,6,6,7]
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm0
+; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm6 = ymm8[0,0,2,1,4,4,6,5]
 ; AVX512DQ-NEXT:    vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
-; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512DQ-NEXT:    vpandnq %zmm2, %zmm8, %zmm1
-; AVX512DQ-NEXT:    vpandq %zmm8, %zmm0, %zmm0
-; AVX512DQ-NEXT:    vpord %zmm1, %zmm0, %zmm7 {%k1}
+; AVX512DQ-NEXT:    vinserti64x4 $1, %ymm1, %zmm6, %zmm1
+; AVX512DQ-NEXT:    vpandnq %zmm0, %zmm3, %zmm0
+; AVX512DQ-NEXT:    vpandq %zmm3, %zmm1, %zmm1
+; AVX512DQ-NEXT:    vpord %zmm0, %zmm1, %zmm2 {%k1}
 ; AVX512DQ-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-NEXT:    vmovdqa64 %zmm7, (%rax)
+; AVX512DQ-NEXT:    vmovdqa64 %zmm2, (%rax)
 ; AVX512DQ-NEXT:    vmovdqa64 %zmm16, 192(%rax)
-; AVX512DQ-NEXT:    vmovdqa64 %zmm13, 128(%rax)
+; AVX512DQ-NEXT:    vmovdqa64 %zmm12, 128(%rax)
 ; AVX512DQ-NEXT:    vmovdqa64 %zmm5, 320(%rax)
 ; AVX512DQ-NEXT:    vmovdqa64 %zmm4, 256(%rax)
 ; AVX512DQ-NEXT:    vmovdqa64 %zmm11, 448(%rax)
 ; AVX512DQ-NEXT:    vmovdqa64 %zmm9, 384(%rax)
-; AVX512DQ-NEXT:    vmovdqa64 %zmm6, 64(%rax)
-; AVX512DQ-NEXT:    addq $648, %rsp # imm = 0x288
+; AVX512DQ-NEXT:    vmovdqa64 %zmm10, 64(%rax)
+; AVX512DQ-NEXT:    addq $680, %rsp # imm = 0x2A8
 ; AVX512DQ-NEXT:    vzeroupper
 ; AVX512DQ-NEXT:    retq
 ;
@@ -8494,22 +8502,22 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-FCP-NEXT:    vmovdqa (%rsi), %xmm12
 ; AVX512DQ-FCP-NEXT:    vmovdqa (%rdi), %xmm9
 ; AVX512DQ-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm9[8],xmm12[8],xmm9[9],xmm12[9],xmm9[10],xmm12[10],xmm9[11],xmm12[11],xmm9[12],xmm12[12],xmm9[13],xmm12[13],xmm9[14],xmm12[14],xmm9[15],xmm12[15]
-; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm7 = [4,5,u,u,u,u,u,u,6,7,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} xmm5 = [4,5,u,u,u,u,u,u,6,7,u,u,u,u,u,u]
 ; AVX512DQ-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm7, %xmm0, %xmm2
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm5, %xmm0, %xmm2
 ; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
 ; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm5 = [8,9,u,u,u,u,u,u,10,11,u,u,u,u,u,u,12,13,u,u,u,u,u,u,14,15,u,u,u,u,u,u]
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm5, %ymm0, %ymm0
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm4
-; AVX512DQ-FCP-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535]
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm4 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT:    vpbroadcastq {{.*#+}} zmm0 = [65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0]
-; AVX512DQ-FCP-NEXT:    vpandnq %zmm19, %zmm0, %zmm19
-; AVX512DQ-FCP-NEXT:    vpandq %zmm0, %zmm21, %zmm21
+; AVX512DQ-FCP-NEXT:    vmovdqa {{.*#+}} ymm4 = [8,9,u,u,u,u,u,u,10,11,u,u,u,u,u,u,12,13,u,u,u,u,u,u,14,15,u,u,u,u,u,u]
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm3
+; AVX512DQ-FCP-NEXT:    vpbroadcastq {{.*#+}} zmm7 = [65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535]
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm3 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT:    vpbroadcastq {{.*#+}} zmm2 = [65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0,65535,65535,65535,0]
+; AVX512DQ-FCP-NEXT:    vpandnq %zmm19, %zmm2, %zmm19
+; AVX512DQ-FCP-NEXT:    vpandq %zmm2, %zmm21, %zmm21
 ; AVX512DQ-FCP-NEXT:    movw $-21846, %ax # imm = 0xAAAA
 ; AVX512DQ-FCP-NEXT:    kmovw %eax, %k1
-; AVX512DQ-FCP-NEXT:    vpord %zmm19, %zmm21, %zmm4 {%k1}
+; AVX512DQ-FCP-NEXT:    vpord %zmm19, %zmm21, %zmm3 {%k1}
 ; AVX512DQ-FCP-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm15, %xmm15 # 16-byte Folded Reload
 ; AVX512DQ-FCP-NEXT:    # xmm15 = xmm15[0],mem[0],xmm15[1],mem[1],xmm15[2],mem[2],xmm15[3],mem[3],xmm15[4],mem[4],xmm15[5],mem[5],xmm15[6],mem[6],xmm15[7],mem[7]
 ; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm15, %ymm15, %ymm15
@@ -8519,110 +8527,110 @@ define void @store_i8_stride8_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
 ; AVX512DQ-FCP-NEXT:    vmovdqa 48(%rsi), %xmm15
 ; AVX512DQ-FCP-NEXT:    vmovdqa 48(%rdi), %xmm8
 ; AVX512DQ-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm6 = xmm8[0],xmm15[0],xmm8[1],xmm15[1],xmm8[2],xmm15[2],xmm8[3],xmm15[3],xmm8[4],xmm15[4],xmm8[5],xmm15[5],xmm8[6],xmm15[6],xmm8[7],xmm15[7]
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm7, %xmm6, %xmm1
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm5, %xmm6, %xmm0
 ; AVX512DQ-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm21 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero
-; AVX512DQ-FCP-NEXT:    vinserti32x4 $1, %xmm1, %ymm21, %ymm1
+; AVX512DQ-FCP-NEXT:    vinserti32x4 $1, %xmm0, %ymm21, %ymm0
 ; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm6, %ymm6, %ymm6
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm5, %ymm6, %ymm6
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm6
-; AVX512DQ-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm8[8],xmm15[8],xmm8[9],xmm15[9],xmm8[10],xmm15[10],xmm8[11],xmm15[11],xmm8[12],xmm15[12],xmm8[13],xmm15[13],xmm8[14],xmm15[14],xmm8[15],xmm15[15]
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm7, %xmm1, %xmm8
-; AVX512DQ-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm15 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm4, %ymm6, %ymm6
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm6
+; AVX512DQ-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm8[8],xmm15[8],xmm8[9],xmm15[9],xmm8[10],xmm15[10],xmm8[11],xmm15[11],xmm8[12],xmm15[12],xmm8[13],xmm15[13],xmm8[14],xmm15[14],xmm8[15],xmm15[15]
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm5, %xmm0, %xmm8
+; AVX512DQ-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm15 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm8, %ymm15, %ymm8
-; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm15
-; AVX512DQ-FCP-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm1 # 16-byte Folded Reload
-; AVX512DQ-FCP-NEXT:    # xmm1 = xmm14[0],mem[0],xmm14[1],mem[1],xmm14[2],mem[2],xmm14[3],mem[3],xmm14[4],mem[4],xmm14[5],mem[5],xmm14[6],mem[6],xmm14[7],mem[7]
-; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm11, %ymm1, %ymm8
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm10, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm8, %zmm1, %zmm13
-; AVX512DQ-FCP-NEXT:    vmovdqa 32(%rsi), %xmm1
+; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm15
+; AVX512DQ-FCP-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm14, %xmm0 # 16-byte Folded Reload
+; AVX512DQ-FCP-NEXT:    # xmm0 = xmm14[0],mem[0],xmm14[1],mem[1],xmm14[2],mem[2],xmm14[3],mem[3],xmm14[4],mem[4],xmm14[5],mem[5],xmm14[6],mem[6],xmm14[7],mem[7]
+; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm11, %ymm0, %ymm8
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm10, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm13
+; AVX512DQ-FCP-NEXT:    vmovdqa 32(%rsi), %xmm0
 ; AVX512DQ-FCP-NEXT:    vmovdqa 32(%rdi), %xmm8
-; AVX512DQ-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3],xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7]
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm7, %xmm11, %xmm14
+; AVX512DQ-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm11 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3],xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm5, %xmm11, %xmm14
 ; AVX512DQ-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm21 = xmm11[0],zero,zero,zero,xmm11[1],zero,zero,zero
 ; AVX512DQ-FCP-NEXT:    vinserti32x4 $1, %xmm14, %ymm21, %ymm14
 ; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm11, %ymm11, %ymm11
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm5, %ymm11, %ymm11
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm4, %ymm11, %ymm11
 ; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm11, %zmm14, %zmm11
-; AVX512DQ-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm8[8],xmm1[8],xmm8[9],xmm1[9],xmm8[10],xmm1[10],xmm8[11],xmm1[11],xmm8[12],xmm1[12],xmm8[13],xmm1[13],xmm8[14],xmm1[14],xmm8[15],xmm1[15]
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm7, %xmm1, %xmm8
-; AVX512DQ-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm14 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
+; AVX512DQ-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm0 = xmm8[8],xmm0[8],xmm8[9],xmm0[9],xmm8[10],xmm0[10],xmm8[11],xmm0[11],xmm8[12],xmm0[12],xmm8[13],xmm0[13],xmm8[14],xmm0[14],xmm8[15],xmm0[15]
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm5, %xmm0, %xmm8
+; AVX512DQ-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm14 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
 ; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm8, %ymm14, %ymm8
-; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm14
-; AVX512DQ-FCP-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; AVX512DQ-FCP-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
-; AVX512DQ-FCP-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7]
-; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm17, %ymm2
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm2, %ymm1, %ymm8
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm18, %ymm2
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm2, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm8, %zmm1, %zmm21
+; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm8, %zmm14
+; AVX512DQ-FCP-NEXT:    vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512DQ-FCP-NEXT:    vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512DQ-FCP-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7]
+; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm17, %ymm1
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm1, %ymm0, %ymm8
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %ymm18, %ymm1
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm8, %zmm0, %zmm21
 ; AVX512DQ-FCP-NEXT:    vmovdqa 16(%rsi), %xmm8
 ; AVX512DQ-FCP-NEXT:    vmovdqa 16(%rdi), %xmm10
-; AVX512DQ-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm1 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3],xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm7, %xmm1, %xmm2
-; AVX512DQ-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm17 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
-; AVX512DQ-FCP-NEXT:    vinserti32x4 $1, %xmm2, %ymm17, %ymm2
-; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm5, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512DQ-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm7, %xmm2, %xmm8
-; AVX512DQ-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
+; AVX512DQ-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm0 = xmm10[0],xmm8[0],xmm10[1],xmm8[1],xmm10[2],xmm8[2],xmm10[3],xmm8[3],xmm10[4],xmm8[4],xmm10[5],xmm8[5],xmm10[6],xmm8[6],xmm10[7],xmm8[7]
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm5, %xmm0, %xmm1
+; AVX512DQ-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm17 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
+; AVX512DQ-FCP-NEXT:    vinserti32x4 $1, %xmm1, %ymm17, %ymm1
+; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm0, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm4, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-FCP-NEXT:    vpunpckhbw {{.*#+}} xmm1 = xmm10[8],xmm8[8],xmm10[9],xmm8[9],xmm10[10],xmm8[10],xmm10[11],xmm8[11],xmm10[12],xmm8[12],xmm10[13],xmm8[13],xmm10[14],xmm8[14],xmm10[15],xmm8[15]
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm5, %xmm1, %xmm8
+; AVX512DQ-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm10 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
 ; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm8, %ymm10, %ymm8
-; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm2, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm5, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm2, %zmm8, %zmm2
+; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm1, %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm4, %ymm1, %ymm1
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm1, %zmm8, %zmm1
 ; AVX512DQ-FCP-NEXT:    vpunpcklbw {{.*#+}} xmm8 = xmm9[0],xmm12[0],xmm9[1],xmm12[1],xmm9[2],xmm12[2],xmm9[3],xmm12[3],xmm9[4],xmm12[4],xmm9[5],xmm12[5],xmm9[6],xmm12[6],xmm9[7],xmm12[7]
 ; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm8, %ymm8, %ymm9
-; AVX512DQ-FCP-NEXT:    vpshufb %ymm5, %ymm9, %ymm5
-; AVX512DQ-FCP-NEXT:    vpshufb %xmm7, %xmm8, %xmm7
+; AVX512DQ-FCP-NEXT:    vpshufb %ymm4, %ymm9, %ymm4
+; AVX512DQ-FCP-NEXT:    vpshufb %xmm5, %xmm8, %xmm5
 ; AVX512DQ-FCP-NEXT:    vpmovzxwq {{.*#+}} xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero
-; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm7, %ymm8, %ymm7
-; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm5, %zmm7, %zmm5
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm6 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT:    vpandnq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm7 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT:    vpandq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT:    vpord %zmm7, %zmm8, %zmm6 {%k1}
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm3, %zmm15 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT:    vpandnq (%rsp), %zmm0, %zmm7 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT:    vpandq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm8 # 64-byte Folded Reload
-; AVX512DQ-FCP-NEXT:    vpord %zmm7, %zmm8, %zmm15 {%k1}
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm20, %zmm3, %zmm11
-; AVX512DQ-FCP-NEXT:    vpandnq %zmm22, %zmm0, %zmm7
-; AVX512DQ-FCP-NEXT:    vpandq %zmm0, %zmm23, %zmm8
-; AVX512DQ-FCP-NEXT:    vpord %zmm7, %zmm8, %zmm11 {%k1}
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm24, %zmm3, %zmm14
-; AVX512DQ-FCP-NEXT:    vpandnq %zmm25, %zmm0, %zmm7
-; AVX512DQ-FCP-NEXT:    vpandq %zmm0, %zmm26, %zmm8
-; AVX512DQ-FCP-NEXT:    vpord %zmm7, %zmm8, %zmm14 {%k1}
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm27, %zmm3, %zmm1
-; AVX512DQ-FCP-NEXT:    vpandnq %zmm28, %zmm0, %zmm7
-; AVX512DQ-FCP-NEXT:    vpandq %zmm0, %zmm29, %zmm8
-; AVX512DQ-FCP-NEXT:    vpord %zmm7, %zmm8, %zmm1 {%k1}
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm30, %zmm3, %zmm2
-; AVX512DQ-FCP-NEXT:    vpandnq %zmm31, %zmm0, %zmm7
-; AVX512DQ-FCP-NEXT:    vpandq %zmm0, %zmm16, %zmm8
-; AVX512DQ-FCP-NEXT:    vpord %zmm7, %zmm8, %zmm2 {%k1}
-; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm19, %zmm3, %zmm5
-; AVX512DQ-FCP-NEXT:    vpandnq %zmm13, %zmm0, %zmm3
-; AVX512DQ-FCP-NEXT:    vpandq %zmm0, %zmm21, %zmm0
-; AVX512DQ-FCP-NEXT:    vpord %zmm3, %zmm0, %zmm5 {%k1}
+; AVX512DQ-FCP-NEXT:    vinserti128 $1, %xmm5, %ymm8, %ymm5
+; AVX512DQ-FCP-NEXT:    vinserti64x4 $1, %ymm4, %zmm5, %zmm4
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm6 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT:    vpandnq {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm5 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT:    vpandq {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm8 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT:    vpord %zmm5, %zmm8, %zmm6 {%k1}
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, {{[-0-9]+}}(%r{{[sb]}}p), %zmm7, %zmm15 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT:    vpandnq (%rsp), %zmm2, %zmm5 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT:    vpandq {{[-0-9]+}}(%r{{[sb]}}p), %zmm2, %zmm8 # 64-byte Folded Reload
+; AVX512DQ-FCP-NEXT:    vpord %zmm5, %zmm8, %zmm15 {%k1}
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm20, %zmm7, %zmm11
+; AVX512DQ-FCP-NEXT:    vpandnq %zmm22, %zmm2, %zmm5
+; AVX512DQ-FCP-NEXT:    vpandq %zmm2, %zmm23, %zmm8
+; AVX512DQ-FCP-NEXT:    vpord %zmm5, %zmm8, %zmm11 {%k1}
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm24, %zmm7, %zmm14
+; AVX512DQ-FCP-NEXT:    vpandnq %zmm25, %zmm2, %zmm5
+; AVX512DQ-FCP-NEXT:    vpandq %zmm2, %zmm26, %zmm8
+; AVX512DQ-FCP-NEXT:    vpord %zmm5, %zmm8, %zmm14 {%k1}
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm27, %zmm7, %zmm0
+; AVX512DQ-FCP-NEXT:    vpandnq %zmm28, %zmm2, %zmm5
+; AVX512DQ-FCP-NEXT:    vpandq %zmm2, %zmm29, %zmm8
+; AVX512DQ-FCP-NEXT:    vpord %zmm5, %zmm8, %zmm0 {%k1}
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm30, %zmm7, %zmm1
+; AVX512DQ-FCP-NEXT:    vpandnq %zmm31, %zmm2, %zmm5
+; AVX512DQ-FCP-NEXT:    vpandq %zmm2, %zmm16, %zmm8
+; AVX512DQ-FCP-NEXT:    vpord %zmm5, %zmm8, %zmm1 {%k1}
+; AVX512DQ-FCP-NEXT:    vpternlogq $226, %zmm19, %zmm7, %zmm4
+; AVX512DQ-FCP-NEXT:    vpandnq %zmm13, %zmm2, %zmm5
+; AVX512DQ-FCP-NEXT:    vpandq %zmm2, %zmm21, %zmm2
+; AVX512DQ-FCP-NEXT:    vpord %zmm5, %zmm2, %zmm4 {%k1}
 ; AVX512DQ-FCP-NEXT:    movq {{[0-9]+}}(%rsp), %rax
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm5, (%rax)
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm2, 192(%rax)
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, 128(%rax)
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm4, (%rax)
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm1, 192(%rax)
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm0, 128(%rax)
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm14, 320(%rax)
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm11, 256(%rax)
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm15, 448(%rax)
 ; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm6, 384(%rax)
-; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm4, 64(%rax)
+; AVX512DQ-FCP-NEXT:    vmovdqa64 %zmm3, 64(%rax)
 ; AVX512DQ-FCP-NEXT:    addq $392, %rsp # imm = 0x188
 ; AVX512DQ-FCP-NEXT:    vzeroupper
 ; AVX512DQ-FCP-NEXT:    retq

>From a9b06642774bb522cf5dd426af982b7bebfb1b9b Mon Sep 17 00:00:00 2001
From: Shengchen Kan <shengchen.kan at intel.com>
Date: Wed, 31 Jan 2024 00:25:17 +0800
Subject: [PATCH 2/2] rebase and address review comment

---
 llvm/lib/Target/X86/X86InstrFoldTables.cpp |  4 +-
 llvm/lib/Target/X86/X86InstrInfo.cpp       | 77 +++++-----------------
 llvm/lib/Target/X86/X86InstrInfo.h         |  3 +-
 3 files changed, 21 insertions(+), 63 deletions(-)

diff --git a/llvm/lib/Target/X86/X86InstrFoldTables.cpp b/llvm/lib/Target/X86/X86InstrFoldTables.cpp
index cfea8acecc026..560b8c378ead7 100644
--- a/llvm/lib/Target/X86/X86InstrFoldTables.cpp
+++ b/llvm/lib/Target/X86/X86InstrFoldTables.cpp
@@ -143,8 +143,8 @@ const X86FoldTableEntry *llvm::lookupFoldTable(unsigned RegOp, unsigned OpNum) {
   return lookupFoldTableImpl(FoldTable, RegOp);
 }
 
-const X86FoldTableEntry *
-llvm::lookupBroadcastFoldTable(unsigned RegOp, unsigned OpNum) {
+const X86FoldTableEntry *llvm::lookupBroadcastFoldTable(unsigned RegOp,
+                                                        unsigned OpNum) {
   ArrayRef<X86FoldTableEntry> FoldTable;
   if (OpNum == 1)
     FoldTable = ArrayRef(BroadcastTable1);
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 2f399a1f40848..5609272cf935c 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -8104,7 +8104,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
   MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands,          \
              LoadMI.operands_begin() + NumOps);                                \
   return foldMemoryBroadcast(MF, MI, Ops[0], MOs, InsertPt, /*Size=*/SIZE,     \
-                             Alignment, /*AllowCommute=*/true);
+                             /*AllowCommute=*/true);
   case X86::VPBROADCASTWZ128rm:
   case X86::VPBROADCASTWZ256rm:
   case X86::VPBROADCASTWZrm:
@@ -8136,75 +8136,34 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
                                /*Size=*/0, Alignment, /*AllowCommute=*/true);
 }
 
-MachineInstr *X86InstrInfo::foldMemoryBroadcast(
-    MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
-    ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
-    unsigned BitsSize, Align Alignment, bool AllowCommute) const {
+MachineInstr *
+X86InstrInfo::foldMemoryBroadcast(MachineFunction &MF, MachineInstr &MI,
+                                  unsigned OpNum, ArrayRef<MachineOperand> MOs,
+                                  MachineBasicBlock::iterator InsertPt,
+                                  unsigned BitsSize, bool AllowCommute) const {
 
   if (auto *I = lookupBroadcastFoldTable(MI.getOpcode(), OpNum))
     return matchBroadcastSize(*I, BitsSize)
                ? FuseInst(MF, I->DstOp, OpNum, MOs, InsertPt, MI, *this)
                : nullptr;
 
-  // TODO: Share code with foldMemoryOperandImpl for the commute
   if (AllowCommute) {
-    unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex;
-    if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) {
-      bool HasDef = MI.getDesc().getNumDefs();
-      Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
-      Register Reg1 = MI.getOperand(CommuteOpIdx1).getReg();
-      Register Reg2 = MI.getOperand(CommuteOpIdx2).getReg();
-      bool Tied1 =
-          0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO);
-      bool Tied2 =
-          0 == MI.getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO);
-
-      // If either of the commutable operands are tied to the destination
-      // then we can not commute + fold.
-      if ((HasDef && Reg0 == Reg1 && Tied1) ||
-          (HasDef && Reg0 == Reg2 && Tied2))
-        return nullptr;
-
-      MachineInstr *CommutedMI =
-          commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
-      if (!CommutedMI) {
-        // Unable to commute.
-        return nullptr;
-      }
-      if (CommutedMI != &MI) {
-        // New instruction. We can't fold from this.
-        CommutedMI->eraseFromParent();
-        return nullptr;
-      }
-
-      // Attempt to fold with the commuted version of the instruction.
-      MachineInstr *NewMI = foldMemoryBroadcast(MF, MI, CommuteOpIdx2, MOs,
-                                                InsertPt, BitsSize, Alignment,
-                                                /*AllowCommute=*/false);
-      if (NewMI)
-        return NewMI;
-
-      // Folding failed again - undo the commute before returning.
-      MachineInstr *UncommutedMI =
-          commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
-      if (!UncommutedMI) {
-        // Unable to commute.
-        return nullptr;
-      }
-      if (UncommutedMI != &MI) {
-        // New instruction. It doesn't need to be kept.
-        UncommutedMI->eraseFromParent();
-        return nullptr;
-      }
-
-      // Return here to prevent duplicate fuse failure report.
+    // If the instruction and target operand are commutable, commute the
+    // instruction and try again.
+    unsigned CommuteOpIdx2 = commuteOperandsForFold(MI, OpNum);
+    if (CommuteOpIdx2 == OpNum) {
+      printFailMsgforFold(MI, OpNum);
       return nullptr;
     }
+    MachineInstr *NewMI =
+        foldMemoryBroadcast(MF, MI, CommuteOpIdx2, MOs, InsertPt, BitsSize,
+                            /*AllowCommute=*/false);
+    if (NewMI)
+      return NewMI;
+    UndoCommuteForFold(MI, OpNum, CommuteOpIdx2);
   }
 
-  // No fusion
-  if (PrintFailedFusing && !MI.isCopy())
-    dbgs() << "We failed to fuse operand " << OpNum << " in " << MI;
+  printFailMsgforFold(MI, OpNum);
   return nullptr;
 }
 
diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h
index 957d7ce63a5c2..c0770a3ff1ffe 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.h
+++ b/llvm/lib/Target/X86/X86InstrInfo.h
@@ -647,8 +647,7 @@ class X86InstrInfo final : public X86GenInstrInfo {
                                     unsigned OpNum,
                                     ArrayRef<MachineOperand> MOs,
                                     MachineBasicBlock::iterator InsertPt,
-                                    unsigned BitsSize, Align Alignment,
-                                    bool AllowCommute) const;
+                                    unsigned BitsSize, bool AllowCommute) const;
 
   /// isFrameOperand - Return true and the FrameIndex if the specified
   /// operand and follow operands form a reference to the stack frame.



More information about the llvm-commits mailing list